code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
torch.set_grad_enabled(False)
SCREAMING_SNAKE_CASE = 'cuda' if torch.cuda.is_available() else 'cpu'
def lowercase_ ( __A : str , __A : Union[str, Any]=1_0_0 , __A : Optional[int]=" " ) -> List[str]:
"""simple docstring"""
lowercase : List[Any] =text.split(__A )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__A ) , __A )]
def lowercase_ ( __A : dict ) -> dict:
"""simple docstring"""
lowercase , lowercase : str =[], []
for title, text in zip(documents['''title'''] , documents['''text'''] ):
if text is not None:
for passage in split_text(__A ):
titles.append(title if title is not None else '''''' )
texts.append(__A )
return {"title": titles, "text": texts}
def lowercase_ ( __A : dict , __A : DPRContextEncoder , __A : DPRContextEncoderTokenizerFast ) -> dict:
"""simple docstring"""
lowercase : str =ctx_tokenizer(
documents['''title'''] , documents['''text'''] , truncation=__A , padding='''longest''' , return_tensors='''pt''' )['''input_ids''']
lowercase : Dict =ctx_encoder(input_ids.to(device=__A ) , return_dict=__A ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowercase_ ( __A : "RagExampleArguments" , __A : "ProcessingArguments" , __A : "IndexHnswArguments" , ) -> Optional[int]:
"""simple docstring"""
logger.info('''Step 1 - Create the dataset''' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowercase : Optional[int] =load_dataset(
'''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowercase : Dict =dataset.map(__A , batched=__A , num_proc=processing_args.num_proc )
# And compute the embeddings
lowercase : List[Any] =DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__A )
lowercase : Tuple =DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
lowercase : str =Features(
{'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space
lowercase : List[Any] =dataset.map(
partial(__A , ctx_encoder=__A , ctx_tokenizer=__A ) , batched=__A , batch_size=processing_args.batch_size , features=__A , )
# And finally save your dataset
lowercase : Optional[int] =os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' )
dataset.save_to_disk(__A )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('''Step 2 - Index the dataset''' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowercase : str =faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('''embeddings''' , custom_index=__A )
# And save the index
lowercase : List[str] =os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' )
dataset.get_index('''embeddings''' ).save(__A )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ = field(
default=str(Path(__A ).parent / '''test_run''' / '''dummy-kb''' / '''my_knowledge_dataset.csv''' ) , metadata={'''help''': '''Path to a tab-separated csv file with columns \'title\' and \'text\''''} , )
UpperCamelCase_ = field(
default=__A , metadata={'''help''': '''Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'''} , )
UpperCamelCase_ = field(
default='''facebook/rag-sequence-nq''' , metadata={'''help''': '''The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''''} , )
UpperCamelCase_ = field(
default='''facebook/dpr-ctx_encoder-multiset-base''' , metadata={
'''help''': (
'''The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'''
''' \'facebook/dpr-ctx_encoder-multiset-base\''''
)
} , )
UpperCamelCase_ = field(
default=str(Path(__A ).parent / '''test_run''' / '''dummy-kb''' ) , metadata={'''help''': '''Path to a directory where the dataset passages and the index will be saved'''} , )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ = field(
default=__A , metadata={
'''help''': '''The number of processes to use to split the documents into passages. Default is single process.'''
} , )
UpperCamelCase_ = field(
default=16 , metadata={
'''help''': '''The batch size to use when computing the passages embeddings using the DPR context encoder.'''
} , )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ = field(
default=768 , metadata={'''help''': '''The dimension of the embeddings to pass to the HNSW Faiss index.'''} , )
UpperCamelCase_ = field(
default=128 , metadata={
'''help''': (
'''The number of bi-directional links created for every new element during the HNSW index construction.'''
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
SCREAMING_SNAKE_CASE = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 94 |
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 319 | 0 |
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class snake_case ( UpperCAmelCase , unittest.TestCase ):
__magic_name__ = SpeechTaTokenizer
__magic_name__ = False
__magic_name__ = True
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
a : List[Any] = SpeechTaTokenizer(A )
a : str = AddedToken('<mask>' , lstrip=A , rstrip=A )
a : List[Any] = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self : List[Any] , A : Any ):
'''simple docstring'''
a : Optional[int] = 'this is a test'
a : Union[str, Any] = 'this is a test'
return input_text, output_text
def lowerCamelCase__ ( self : Union[str, Any] , A : List[Any] , A : List[str]=False , A : int=2_0 , A : str=5 ):
'''simple docstring'''
a : int = self.get_input_output_texts(A )
a : Any = tokenizer.encode(A , add_special_tokens=A )
a : Union[str, Any] = tokenizer.decode(A , clean_up_tokenization_spaces=A )
return text, ids
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
a : str = '<pad>'
a : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
a : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-4] , 'œ' )
self.assertEqual(vocab_keys[-2] , '<mask>' )
self.assertEqual(vocab_keys[-1] , '<ctc_blank>' )
self.assertEqual(len(A ) , 8_1 )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 7_9 )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
a : Dict = self.get_tokenizers(do_lower_case=A )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
a : Dict = tokenizer.vocab_size
a : Dict = len(A )
self.assertNotEqual(A , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
a : Tuple = ['aaaaa bbbbbb', 'cccccccccdddddddd']
a : str = tokenizer.add_tokens(A )
a : Optional[int] = tokenizer.vocab_size
a : Dict = len(A )
self.assertNotEqual(A , 0 )
self.assertEqual(A , A )
self.assertEqual(A , len(A ) )
self.assertEqual(A , all_size + len(A ) )
a : Union[str, Any] = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=A )
self.assertGreaterEqual(len(A ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
a : Dict = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
a : Optional[Any] = tokenizer.add_special_tokens(A )
a : int = tokenizer.vocab_size
a : List[Any] = len(A )
self.assertNotEqual(A , 0 )
self.assertEqual(A , A )
self.assertEqual(A , len(A ) )
self.assertEqual(A , all_size_a + len(A ) )
a : List[str] = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=A )
self.assertGreaterEqual(len(A ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
a : Tuple = self.get_tokenizer()
a : Tuple = tokenizer.tokenize('This is a test' )
# fmt: off
self.assertListEqual(A , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ) , [4, 3_2, 1_1, 1_0, 1_2, 4, 1_0, 1_2, 4, 7, 4, 6, 5, 1_2, 6] , )
a : int = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
A , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
a : Optional[int] = tokenizer.convert_tokens_to_ids(A )
# fmt: off
self.assertListEqual(A , [4, 3_0, 4, 2_0, 7, 1_2, 4, 2_5, 8, 1_3, 9, 4, 1_0, 9, 4, 3, 2_3, 4, 7, 9, 1_4, 4, 6, 1_1, 1_0, 1_2, 4, 1_0, 1_2, 4, 1_9, 7, 1_5, 1_2, 7_3, 2_6] )
# fmt: on
a : List[Any] = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
@slow
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
a : str = [
'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '
'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '
'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '
'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.',
'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '
'conditioning on both left and right context in all layers.',
'The quick brown fox jumps over the lazy dog.',
]
# fmt: off
a : int = {
'input_ids': [
[4, 3_2, 1_3, 7, 9, 1_2, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_2, 4, 6_4, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_5, 2_2, 4, 2_8, 9, 8, 2_0, 9, 4, 7, 1_2, 4, 2_4, 2_2, 6, 8, 1_3, 1_7, 1_1, 3_9, 6, 1_3, 7, 9, 1_2, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_2, 4, 7, 9, 1_4, 4, 2_4, 2_2, 6, 8, 1_3, 1_7, 1_1, 3_9, 2_4, 1_3, 5, 6, 1_3, 7, 1_0, 9, 5, 1_4, 3_9, 2_5, 5, 1_3, 6, 6_3, 4, 2_4, 1_3, 8, 2_7, 1_0, 1_4, 5, 1_2, 4, 2_1, 5, 9, 5, 1_3, 7, 1_5, 3_9, 2_4, 1_6, 1_3, 2_4, 8, 1_2, 5, 4, 7, 1_3, 1_7, 1_1, 1_0, 6, 5, 1_7, 6, 1_6, 1_3, 5, 1_2, 4, 6_4, 4_0, 4_7, 5_4, 3_2, 2_3, 4, 5_3, 4_9, 3_2, 2_3, 4, 5_4, 8, 4_0, 4_7, 5_4, 3_2, 7, 2_3, 4, 6_9, 5_2, 4_3, 2_3, 4, 5_1, 1_0, 1_2, 6, 1_0, 1_5, 4_0, 5, 1_3, 6, 2_3, 4, 6_9, 5_2, 4_8, 5, 6, 2_6, 2_6, 2_6, 6_3, 4, 1_9, 8, 1_3, 4, 4_8, 7, 6, 1_6, 1_3, 7, 1_5, 4, 5_2, 7, 9, 2_1, 1_6, 7, 2_1, 5, 4, 6_1, 9, 1_4, 5, 1_3, 1_2, 6, 7, 9, 1_4, 1_0, 9, 2_1, 4, 6_4, 4_8, 5_2, 6_1, 6_3, 4, 7, 9, 1_4, 4, 4_8, 7, 6, 1_6, 1_3, 7, 1_5, 4, 5_2, 7, 9, 2_1, 1_6, 7, 2_1, 5, 4, 5_3, 5, 9, 5, 1_3, 7, 6, 1_0, 8, 9, 4, 6_4, 4_8, 5_2, 5_3, 6_3, 4, 2_0, 1_0, 6, 1_1, 4, 8, 2_7, 5, 1_3, 4, 6, 1_1, 1_0, 1_3, 6, 2_2, 3_9, 6, 2_0, 8, 4, 2_4, 1_3, 5, 6, 1_3, 7, 1_0, 9, 5, 1_4, 4, 1_8, 8, 1_4, 5, 1_5, 1_2, 4, 1_0, 9, 4, 8, 9, 5, 4, 1_1, 1_6, 9, 1_4, 1_3, 5, 1_4, 4, 2_4, 1_5, 1_6, 1_2, 4, 1_5, 7, 9, 2_1, 1_6, 7, 2_1, 5, 1_2, 4, 7, 9, 1_4, 4, 1_4, 5, 5, 2_4, 4, 1_0, 9, 6, 5, 1_3, 8, 2_4, 5, 1_3, 7, 2_5, 1_0, 1_5, 1_0, 6, 2_2, 4, 2_5, 5, 6, 2_0, 5, 5, 9, 4, 5_8, 7, 3_7, 2_3, 4, 4_9, 2_2, 3_2, 8, 1_3, 1_7, 1_1, 4, 7, 9, 1_4, 4, 3_2, 5, 9, 1_2, 8, 1_3, 5_5, 1_5, 8, 2_0, 2_6, 2],
[4, 4_0, 4_7, 5_4, 3_2, 4, 1_0, 1_2, 4, 1_4, 5, 1_2, 1_0, 2_1, 9, 5, 1_4, 4, 6, 8, 4, 2_4, 1_3, 5, 3_9, 6, 1_3, 7, 1_0, 9, 4, 1_4, 5, 5, 2_4, 4, 2_5, 1_0, 1_4, 1_0, 1_3, 5, 1_7, 6, 1_0, 8, 9, 7, 1_5, 4, 1_3, 5, 2_4, 1_3, 5, 1_2, 5, 9, 6, 7, 6, 1_0, 8, 9, 1_2, 4, 1_9, 1_3, 8, 1_8, 4, 1_6, 9, 1_5, 7, 2_5, 5, 1_5, 5, 1_4, 4, 6, 5, 3_7, 6, 4, 2_5, 2_2, 4, 4_6, 8, 1_0, 9, 6, 1_5, 2_2, 4, 1_7, 8, 9, 1_4, 1_0, 6, 1_0, 8, 9, 1_0, 9, 2_1, 4, 8, 9, 4, 2_5, 8, 6, 1_1, 4, 1_5, 5, 1_9, 6, 4, 7, 9, 1_4, 4, 1_3, 1_0, 2_1, 1_1, 6, 4, 1_7, 8, 9, 6, 5, 3_7, 6, 4, 1_0, 9, 4, 7, 1_5, 1_5, 4, 1_5, 7, 2_2, 5, 1_3, 1_2, 2_6, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 3_2, 1_1, 5, 4, 4_5, 1_6, 1_0, 1_7, 2_8, 4, 2_5, 1_3, 8, 2_0, 9, 4, 1_9, 8, 3_7, 4, 4_6, 1_6, 1_8, 2_4, 1_2, 4, 8, 2_7, 5, 1_3, 4, 6, 1_1, 5, 4, 1_5, 7, 5_7, 2_2, 4, 1_4, 8, 2_1, 2_6, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=A , )
| 718 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class snake_case ( UpperCAmelCase ):
__magic_name__ = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
__magic_name__ = '''CIDAS/clipseg-rd64-refined'''
__magic_name__ = '''image_segmenter'''
__magic_name__ = CLIPSegForImageSegmentation
__magic_name__ = ['''image''', '''text''']
__magic_name__ = ['''image''']
def __init__( self : str , *A : Any , **A : Any ):
'''simple docstring'''
requires_backends(self , ['vision'] )
super().__init__(*A , **A )
def lowerCamelCase__ ( self : Any , A : "Image" , A : str ):
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=A , return_tensors='pt' )
def lowerCamelCase__ ( self : str , A : List[Any] ):
'''simple docstring'''
with torch.no_grad():
a : Optional[Any] = self.model(**A ).logits
return logits
def lowerCamelCase__ ( self : List[Any] , A : List[str] ):
'''simple docstring'''
a : List[str] = outputs.cpu().detach().numpy()
a : List[Any] = 0
a : str = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta ) )
| 118 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
'''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegatronBertForCausalLM''',
'''MegatronBertForMaskedLM''',
'''MegatronBertForMultipleChoice''',
'''MegatronBertForNextSentencePrediction''',
'''MegatronBertForPreTraining''',
'''MegatronBertForQuestionAnswering''',
'''MegatronBertForSequenceClassification''',
'''MegatronBertForTokenClassification''',
'''MegatronBertModel''',
'''MegatronBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 91 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCAmelCase : Any = {
'''configuration_maskformer''': ['''MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MaskFormerConfig'''],
'''configuration_maskformer_swin''': ['''MaskFormerSwinConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : str = ['''MaskFormerFeatureExtractor''']
_UpperCAmelCase : str = ['''MaskFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = [
'''MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MaskFormerForInstanceSegmentation''',
'''MaskFormerModel''',
'''MaskFormerPreTrainedModel''',
]
_UpperCAmelCase : int = [
'''MaskFormerSwinBackbone''',
'''MaskFormerSwinModel''',
'''MaskFormerSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 107 | 0 |
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
a__ : List[Any] = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""])
parser.add_argument("""--model_name""", default="""roberta-large""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
a__ : str = parser.parse_args()
if args.model_type == "roberta":
a__ : List[str] = RobertaForMaskedLM.from_pretrained(args.model_name)
a__ : Optional[int] = """roberta"""
elif args.model_type == "gpt2":
a__ : Dict = GPTaLMHeadModel.from_pretrained(args.model_name)
a__ : int = """transformer"""
a__ : Optional[int] = model.state_dict()
a__ : Optional[Any] = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
a__ : List[Any] = state_dict[f'''{prefix}.{param_name}''']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
a__ : Optional[int] = f'''{prefix}.embeddings.{w}.weight'''
a__ : List[Any] = state_dict[param_name]
for w in ["weight", "bias"]:
a__ : Any = f'''{prefix}.embeddings.LayerNorm.{w}'''
a__ : str = state_dict[param_name]
# Transformer Blocks #
a__ : Any = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
a__ : str = state_dict[
f'''{prefix}.h.{teacher_idx}.{layer}.{w}'''
]
a__ : Optional[int] = state_dict[f'''{prefix}.h.{teacher_idx}.attn.bias''']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
a__ : Union[str, Any] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'''
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
a__ : Dict = state_dict[f'''{layer}''']
if args.vocab_transform:
for w in ["weight", "bias"]:
a__ : Optional[Any] = state_dict[f'''lm_head.dense.{w}''']
a__ : int = state_dict[f'''lm_head.layer_norm.{w}''']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
a__ : Tuple = state_dict[f'''{prefix}.ln_f.{w}''']
a__ : List[str] = state_dict["""lm_head.weight"""]
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint) | 713 |
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
a__ : Optional[Any] = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
a__ : int = direct_transformers_import(PATH_TO_TRANSFORMERS)
a__ : Any = transformers.models.auto.configuration_auto.CONFIG_MAPPING
a__ : Optional[Any] = {
# used to compute the property `self.chunk_length`
"""EncodecConfig""": ["""overlap"""],
# used as `self.bert_model = BertModel(config, ...)`
"""DPRConfig""": True,
# not used in modeling files, but it's an important information
"""FSMTConfig""": ["""langs"""],
# used internally in the configuration class file
"""GPTNeoConfig""": ["""attention_types"""],
# used internally in the configuration class file
"""EsmConfig""": ["""is_folding_model"""],
# used during training (despite we don't have training script for these models yet)
"""Mask2FormerConfig""": ["""ignore_value"""],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"""OneFormerConfig""": ["""ignore_value""", """norm"""],
# used during preprocessing and collation, see `collating_graphormer.py`
"""GraphormerConfig""": ["""spatial_pos_max"""],
# used internally in the configuration class file
"""T5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"""MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
"""UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
# used internally in the configuration class file
"""LongT5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
"""SwitchTransformersConfig""": ["""feed_forward_proj"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""BioGptConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""GLPNConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""SegformerConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""CvtConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""PerceiverConfig""": ["""layer_norm_eps"""],
# used internally to calculate the feature size
"""InformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate `mlp_dim`
"""SamVisionConfig""": ["""mlp_ratio"""],
# For (head) training, but so far not implemented
"""ClapAudioConfig""": ["""num_classes"""],
# Not used, but providing useful information to users
"""SpeechT5HifiGanConfig""": ["""sampling_rate"""],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"""CLIPSegConfig""": True,
"""DeformableDetrConfig""": True,
"""DetaConfig""": True,
"""DinatConfig""": True,
"""DonutSwinConfig""": True,
"""EfficientFormerConfig""": True,
"""FSMTConfig""": True,
"""JukeboxConfig""": True,
"""LayoutLMv2Config""": True,
"""MaskFormerSwinConfig""": True,
"""MT5Config""": True,
"""NatConfig""": True,
"""OneFormerConfig""": True,
"""PerceiverConfig""": True,
"""RagConfig""": True,
"""SpeechT5Config""": True,
"""SwinConfig""": True,
"""Swin2SRConfig""": True,
"""Swinv2Config""": True,
"""SwitchTransformersConfig""": True,
"""TableTransformerConfig""": True,
"""TapasConfig""": True,
"""TransfoXLConfig""": True,
"""UniSpeechConfig""": True,
"""UniSpeechSatConfig""": True,
"""WavLMConfig""": True,
"""WhisperConfig""": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"""JukeboxPriorConfig""": True,
# TODO: @Younes (for `is_decoder`)
"""Pix2StructTextConfig""": True,
}
)
def snake_case (UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple ):
'''simple docstring'''
lowerCamelCase__ = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f'''config.{attribute}''' in modeling_source
or f'''getattr(config, "{attribute}"''' in modeling_source
or f'''getattr(self.config, "{attribute}"''' in modeling_source
):
lowerCamelCase__ = True
# Deal with multi-line cases
elif (
re.search(
rf'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''' , UpperCamelCase , )
is not None
):
lowerCamelCase__ = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
lowerCamelCase__ = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
lowerCamelCase__ = [
"""bos_index""",
"""eos_index""",
"""pad_index""",
"""unk_index""",
"""mask_index""",
"""image_size""",
"""use_cache""",
"""out_features""",
"""out_indices""",
]
lowerCamelCase__ = ["""encoder_no_repeat_ngram_size"""]
# Special cases to be allowed
lowerCamelCase__ = True
if not attribute_used:
lowerCamelCase__ = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
lowerCamelCase__ = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
lowerCamelCase__ = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
lowerCamelCase__ = True
elif attribute.endswith("""_token_id""" ):
lowerCamelCase__ = True
# configuration class specific cases
if not case_allowed:
lowerCamelCase__ = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
lowerCamelCase__ = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def snake_case (UpperCamelCase : str ):
'''simple docstring'''
lowerCamelCase__ = dict(inspect.signature(config_class.__init__ ).parameters )
lowerCamelCase__ = [x for x in list(signature.keys() ) if x not in ["""self""", """kwargs"""]]
lowerCamelCase__ = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
lowerCamelCase__ = {}
if len(config_class.attribute_map ) > 0:
lowerCamelCase__ = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
lowerCamelCase__ = inspect.getsourcefile(UpperCamelCase )
lowerCamelCase__ = os.path.dirname(UpperCamelCase )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
lowerCamelCase__ = [os.path.join(UpperCamelCase , UpperCamelCase ) for fn in os.listdir(UpperCamelCase ) if fn.startswith("""modeling_""" )]
# Get the source code strings
lowerCamelCase__ = []
for path in modeling_paths:
if os.path.isfile(UpperCamelCase ):
with open(UpperCamelCase ) as fp:
modeling_sources.append(fp.read() )
lowerCamelCase__ = []
for config_param, default_value in zip(UpperCamelCase , UpperCamelCase ):
# `attributes` here is all the variant names for `config_param`
lowerCamelCase__ = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
unused_attributes.append(attributes[0] )
return sorted(UpperCamelCase )
def snake_case ():
'''simple docstring'''
lowerCamelCase__ = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
lowerCamelCase__ = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda UpperCamelCase : inspect.isclass(UpperCamelCase )
and issubclass(UpperCamelCase , UpperCamelCase )
and inspect.getmodule(UpperCamelCase ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
lowerCamelCase__ = check_config_attributes_being_used(UpperCamelCase )
if len(UpperCamelCase ) > 0:
lowerCamelCase__ = unused_attributes
if len(UpperCamelCase ) > 0:
lowerCamelCase__ = """The following configuration classes contain unused attributes in the corresponding modeling files:\n"""
for name, attributes in configs_with_unused_attributes.items():
error += f'''{name}: {attributes}\n'''
raise ValueError(UpperCamelCase )
if __name__ == "__main__":
check_config_attributes()
| 235 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_a = """0.12""" # assumed parallelism: 8
if is_torch_available():
import torch
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case=None ) -> List[str]:
"""simple docstring"""
if rng is None:
_UpperCamelCase = random.Random()
_UpperCamelCase = 1
for dim in shape:
total_dims *= dim
_UpperCamelCase = []
for _ in range(__snake_case ):
values.append(rng.randint(0, vocab_size - 1 ) )
_UpperCamelCase = np.array(__snake_case, dtype=jnp.intaa ).reshape(__snake_case )
return output
def lowerCamelCase__ ( __snake_case, __snake_case=None ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = ids_tensor(__snake_case, vocab_size=2, rng=__snake_case )
# make sure that at least one token is attended to for each batch
_UpperCamelCase = 1
return attn_mask
@require_flax
class _UpperCAmelCase:
lowercase__ = None
lowercase__ = ()
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
_UpperCamelCase = 2
_UpperCamelCase = inputs['''input_ids'''].shape[-1] // 2
_UpperCamelCase = inputs['''input_ids'''][:max_batch_size, :sequence_length]
_UpperCamelCase = jnp.ones_like(__a)
_UpperCamelCase = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
_UpperCamelCase = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
_UpperCamelCase = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._get_input_ids_and_config()
_UpperCamelCase = False
_UpperCamelCase = max_length
_UpperCamelCase = 0
for model_class in self.all_generative_model_classes:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
_UpperCamelCase = getattr(__a , __a)
_UpperCamelCase = pt_model_class(__a).eval()
_UpperCamelCase = load_flax_weights_in_pytorch_model(__a , flax_model.params)
_UpperCamelCase = flax_model.generate(__a).sequences
_UpperCamelCase = pt_model.generate(torch.tensor(__a , dtype=torch.long))
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
_UpperCamelCase = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist())
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._get_input_ids_and_config()
_UpperCamelCase = False
_UpperCamelCase = max_length
for model_class in self.all_generative_model_classes:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = model.generate(__a).sequences
self.assertEqual(generation_outputs.shape[-1] , __a)
_UpperCamelCase = jit(model.generate)
_UpperCamelCase = jit_generate(__a).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._get_input_ids_and_config()
_UpperCamelCase = True
_UpperCamelCase = max_length
for model_class in self.all_generative_model_classes:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = model.generate(__a).sequences
self.assertEqual(generation_outputs.shape[-1] , __a)
_UpperCamelCase = jit(model.generate)
_UpperCamelCase = jit_generate(__a).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._get_input_ids_and_config()
_UpperCamelCase = False
_UpperCamelCase = max_length
_UpperCamelCase = 2
for model_class in self.all_generative_model_classes:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = model.generate(__a).sequences
self.assertEqual(generation_outputs.shape[-1] , __a)
_UpperCamelCase = jit(model.generate)
_UpperCamelCase = jit_generate(__a).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._get_input_ids_and_config()
_UpperCamelCase = False
_UpperCamelCase = max_length
_UpperCamelCase = 2
_UpperCamelCase = 2
for model_class in self.all_generative_model_classes:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = model.generate(__a).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._get_input_ids_and_config()
_UpperCamelCase = True
_UpperCamelCase = max_length
_UpperCamelCase = 0.8
_UpperCamelCase = 10
_UpperCamelCase = 0.3
_UpperCamelCase = 1
_UpperCamelCase = 8
_UpperCamelCase = 9
for model_class in self.all_generative_model_classes:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = model.generate(__a).sequences
self.assertEqual(generation_outputs.shape[-1] , __a)
_UpperCamelCase = jit(model.generate)
_UpperCamelCase = jit_generate(__a).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._get_input_ids_and_config()
_UpperCamelCase = max_length
_UpperCamelCase = 1
_UpperCamelCase = 8
_UpperCamelCase = 9
for model_class in self.all_generative_model_classes:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = model.generate(__a).sequences
self.assertEqual(generation_outputs.shape[-1] , __a)
_UpperCamelCase = jit(model.generate)
_UpperCamelCase = jit_generate(__a).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._get_input_ids_and_config()
_UpperCamelCase = max_length
_UpperCamelCase = 2
_UpperCamelCase = 1
_UpperCamelCase = 8
_UpperCamelCase = 9
for model_class in self.all_generative_model_classes:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = model.generate(__a).sequences
self.assertEqual(generation_outputs.shape[-1] , __a)
_UpperCamelCase = jit(model.generate)
_UpperCamelCase = jit_generate(__a).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._get_input_ids_and_config()
# pad attention mask on the left
_UpperCamelCase = attention_mask.at[(0, 0)].set(0)
_UpperCamelCase = False
_UpperCamelCase = max_length
for model_class in self.all_generative_model_classes:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = model.generate(__a , attention_mask=__a).sequences
self.assertEqual(generation_outputs.shape[-1] , __a)
_UpperCamelCase = jit(model.generate)
_UpperCamelCase = jit_generate(__a , attention_mask=__a).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._get_input_ids_and_config()
# pad attention mask on the left
_UpperCamelCase = attention_mask.at[(0, 0)].set(0)
_UpperCamelCase = True
_UpperCamelCase = max_length
for model_class in self.all_generative_model_classes:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = model.generate(__a , attention_mask=__a).sequences
self.assertEqual(generation_outputs.shape[-1] , __a)
_UpperCamelCase = jit(model.generate)
_UpperCamelCase = jit_generate(__a , attention_mask=__a).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._get_input_ids_and_config()
# pad attention mask on the left
_UpperCamelCase = attention_mask.at[(0, 0)].set(0)
_UpperCamelCase = 2
_UpperCamelCase = max_length
for model_class in self.all_generative_model_classes:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = model.generate(__a , attention_mask=__a).sequences
self.assertEqual(generation_outputs.shape[-1] , __a)
_UpperCamelCase = jit(model.generate)
_UpperCamelCase = jit_generate(__a , attention_mask=__a).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
@require_flax
class _UpperCAmelCase( unittest.TestCase ):
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''')
_UpperCamelCase = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''')
_UpperCamelCase = '''Hello world'''
_UpperCamelCase = tokenizer(__a , return_tensors='''np''').input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__a , '''do_samples'''):
model.generate(__a , do_samples=__a)
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__a , '''foo'''):
_UpperCamelCase = {'''foo''': '''bar'''}
model.generate(__a , **__a)
| 19 |
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = ['vqvae']
def __init__( self , __a , __a , __a , __a , ) -> List[str]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=__a , scheduler=__a , mel=__a , vqvae=__a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return 50 if isinstance(self.scheduler , __a) else 10_00
@torch.no_grad()
def __call__( self , __a = 1 , __a = None , __a = None , __a = 0 , __a = 0 , __a = None , __a = None , __a = 0 , __a = 0 , __a = None , __a = 0 , __a = None , __a = None , __a=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
'''simple docstring'''
_UpperCamelCase = steps or self.get_default_steps()
self.scheduler.set_timesteps(__a)
_UpperCamelCase = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size) == int:
_UpperCamelCase = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_UpperCamelCase = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__a , device=self.device , )
_UpperCamelCase = noise
_UpperCamelCase = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__a , __a)
_UpperCamelCase = self.mel.audio_slice_to_image(__a)
_UpperCamelCase = np.frombuffer(input_image.tobytes() , dtype='''uint8''').reshape(
(input_image.height, input_image.width))
_UpperCamelCase = (input_image / 2_55) * 2 - 1
_UpperCamelCase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float).to(self.device)
if self.vqvae is not None:
_UpperCamelCase = self.vqvae.encode(torch.unsqueeze(__a , 0)).latent_dist.sample(
generator=__a)[0]
_UpperCamelCase = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_UpperCamelCase = self.scheduler.add_noise(__a , __a , self.scheduler.timesteps[start_step - 1])
_UpperCamelCase = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_UpperCamelCase = int(mask_start_secs * pixels_per_second)
_UpperCamelCase = int(mask_end_secs * pixels_per_second)
_UpperCamelCase = self.scheduler.add_noise(__a , __a , torch.tensor(self.scheduler.timesteps[start_step:]))
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:])):
if isinstance(self.unet , __a):
_UpperCamelCase = self.unet(__a , __a , __a)['''sample''']
else:
_UpperCamelCase = self.unet(__a , __a)['''sample''']
if isinstance(self.scheduler , __a):
_UpperCamelCase = self.scheduler.step(
model_output=__a , timestep=__a , sample=__a , eta=__a , generator=__a , )['''prev_sample''']
else:
_UpperCamelCase = self.scheduler.step(
model_output=__a , timestep=__a , sample=__a , generator=__a , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
_UpperCamelCase = mask[:, step, :, :mask_start]
if mask_end > 0:
_UpperCamelCase = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_UpperCamelCase = 1 / self.vqvae.config.scaling_factor * images
_UpperCamelCase = self.vqvae.decode(__a)['''sample''']
_UpperCamelCase = (images / 2 + 0.5).clamp(0 , 1)
_UpperCamelCase = images.cpu().permute(0 , 2 , 3 , 1).numpy()
_UpperCamelCase = (images * 2_55).round().astype('''uint8''')
_UpperCamelCase = list(
(Image.fromarray(_[:, :, 0]) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__a , mode='''RGB''').convert('''L''') for _ in images))
_UpperCamelCase = [self.mel.image_to_audio(__a) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__a)[:, np.newaxis, :]) , **ImagePipelineOutput(__a))
@torch.no_grad()
def UpperCAmelCase ( self , __a , __a = 50) -> np.ndarray:
'''simple docstring'''
assert isinstance(self.scheduler , __a)
self.scheduler.set_timesteps(__a)
_UpperCamelCase = np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''').reshape((1, image.height, image.width)) for image in images])
_UpperCamelCase = (sample / 2_55) * 2 - 1
_UpperCamelCase = torch.Tensor(__a).to(self.device)
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,))):
_UpperCamelCase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_UpperCamelCase = self.scheduler.alphas_cumprod[t]
_UpperCamelCase = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_UpperCamelCase = 1 - alpha_prod_t
_UpperCamelCase = self.unet(__a , __a)['''sample''']
_UpperCamelCase = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_UpperCamelCase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_UpperCamelCase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCAmelCase ( __a , __a , __a) -> torch.Tensor:
'''simple docstring'''
_UpperCamelCase = acos(torch.dot(torch.flatten(__a) , torch.flatten(__a)) / torch.norm(__a) / torch.norm(__a))
return sin((1 - alpha) * theta) * xa / sin(__a) + sin(alpha * theta) * xa / sin(__a)
| 19 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class __A ( lowerCamelCase__ ):
"""simple docstring"""
UpperCAmelCase__ = 42
class __A ( lowerCamelCase__ ,lowerCamelCase__ ):
"""simple docstring"""
@register_to_config
def __init__( self , a__ = 6_5536 , a__ = None , a__ = 2 , a__ = 2 , a__ = 0 , a__ = "fourier" , a__ = True , a__ = False , a__ = 0.0 , a__ = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , a__ = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , a__ = "UNetMidBlock1D" , a__ = None , a__ = (32, 32, 64) , a__ = None , a__ = 8 , a__ = 1 , a__ = False , ):
"""simple docstring"""
super().__init__()
_lowerCamelCase : Any = sample_size
# time
if time_embedding_type == "fourier":
_lowerCamelCase : Optional[int] = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=a__ , log=a__ , flip_sin_to_cos=a__)
_lowerCamelCase : List[str] = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
_lowerCamelCase : Dict = Timesteps(
block_out_channels[0] , flip_sin_to_cos=a__ , downscale_freq_shift=a__)
_lowerCamelCase : Tuple = block_out_channels[0]
if use_timestep_embedding:
_lowerCamelCase : Any = block_out_channels[0] * 4
_lowerCamelCase : int = TimestepEmbedding(
in_channels=a__ , time_embed_dim=a__ , act_fn=a__ , out_dim=block_out_channels[0] , )
_lowerCamelCase : str = nn.ModuleList([])
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : Dict = nn.ModuleList([])
_lowerCamelCase : str = None
# down
_lowerCamelCase : Any = in_channels
for i, down_block_type in enumerate(a__):
_lowerCamelCase : Any = output_channel
_lowerCamelCase : str = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
_lowerCamelCase : str = i == len(a__) - 1
_lowerCamelCase : List[Any] = get_down_block(
a__ , num_layers=a__ , in_channels=a__ , out_channels=a__ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(a__)
# mid
_lowerCamelCase : List[Any] = get_mid_block(
a__ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=a__ , add_downsample=a__ , )
# up
_lowerCamelCase : str = list(reversed(a__))
_lowerCamelCase : Optional[Any] = reversed_block_out_channels[0]
if out_block_type is None:
_lowerCamelCase : Any = out_channels
else:
_lowerCamelCase : Dict = block_out_channels[0]
for i, up_block_type in enumerate(a__):
_lowerCamelCase : int = output_channel
_lowerCamelCase : int = (
reversed_block_out_channels[i + 1] if i < len(a__) - 1 else final_upsample_channels
)
_lowerCamelCase : int = i == len(a__) - 1
_lowerCamelCase : Tuple = get_up_block(
a__ , num_layers=a__ , in_channels=a__ , out_channels=a__ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(a__)
_lowerCamelCase : Union[str, Any] = output_channel
# out
_lowerCamelCase : str = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32)
_lowerCamelCase : str = get_out_block(
out_block_type=a__ , num_groups_out=a__ , embed_dim=block_out_channels[0] , out_channels=a__ , act_fn=a__ , fc_dim=block_out_channels[-1] // 4 , )
def __snake_case ( self , a__ , a__ , a__ = True , ):
"""simple docstring"""
_lowerCamelCase : List[str] = timestep
if not torch.is_tensor(a__):
_lowerCamelCase : int = torch.tensor([timesteps] , dtype=torch.long , device=sample.device)
elif torch.is_tensor(a__) and len(timesteps.shape) == 0:
_lowerCamelCase : Any = timesteps[None].to(sample.device)
_lowerCamelCase : Union[str, Any] = self.time_proj(a__)
if self.config.use_timestep_embedding:
_lowerCamelCase : str = self.time_mlp(a__)
else:
_lowerCamelCase : Union[str, Any] = timestep_embed[..., None]
_lowerCamelCase : Optional[Any] = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype)
_lowerCamelCase : int = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]))
# 2. down
_lowerCamelCase : Dict = ()
for downsample_block in self.down_blocks:
_lowerCamelCase : Optional[Any] = downsample_block(hidden_states=a__ , temb=a__)
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
_lowerCamelCase : Union[str, Any] = self.mid_block(a__ , a__)
# 4. up
for i, upsample_block in enumerate(self.up_blocks):
_lowerCamelCase : str = down_block_res_samples[-1:]
_lowerCamelCase : int = down_block_res_samples[:-1]
_lowerCamelCase : Tuple = upsample_block(a__ , res_hidden_states_tuple=a__ , temb=a__)
# 5. post-process
if self.out_block:
_lowerCamelCase : Optional[int] = self.out_block(a__ , a__)
if not return_dict:
return (sample,)
return UNetaDOutput(sample=a__)
| 712 |
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def __UpperCAmelCase( lowercase_ ): # picklable for multiprocessing
return x.sum()
def __UpperCAmelCase( lowercase_ ): # picklable for multiprocessing
return i + 1
@dataclass
class __A :
"""simple docstring"""
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
class __A ( lowerCamelCase__ ):
"""simple docstring"""
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Tuple = {}
_lowerCamelCase : Dict = []
_lowerCamelCase : Optional[Any] = 1
_lowerCamelCase : Optional[int] = [1, 2]
_lowerCamelCase : str = {'''a''': 1, '''b''': 2}
_lowerCamelCase : Dict = {'''a''': [1, 2], '''b''': [3, 4]}
_lowerCamelCase : Any = {'''a''': {'''1''': 1}, '''b''': 2}
_lowerCamelCase : Optional[Any] = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
_lowerCamelCase : str = {}
_lowerCamelCase : int = []
_lowerCamelCase : str = 2
_lowerCamelCase : int = [2, 3]
_lowerCamelCase : str = {'''a''': 2, '''b''': 3}
_lowerCamelCase : Tuple = {'''a''': [2, 3], '''b''': [4, 5]}
_lowerCamelCase : List[str] = {'''a''': {'''1''': 2}, '''b''': 3}
_lowerCamelCase : str = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
self.assertEqual(map_nested(a__ , a__) , a__)
self.assertEqual(map_nested(a__ , a__) , a__)
self.assertEqual(map_nested(a__ , a__) , a__)
self.assertEqual(map_nested(a__ , a__) , a__)
self.assertEqual(map_nested(a__ , a__) , a__)
self.assertEqual(map_nested(a__ , a__) , a__)
self.assertEqual(map_nested(a__ , a__) , a__)
self.assertEqual(map_nested(a__ , a__) , a__)
_lowerCamelCase : Dict = 2
self.assertEqual(map_nested(a__ , a__ , num_proc=a__) , a__)
self.assertEqual(map_nested(a__ , a__ , num_proc=a__) , a__)
self.assertEqual(map_nested(a__ , a__ , num_proc=a__) , a__)
self.assertEqual(map_nested(a__ , a__ , num_proc=a__) , a__)
self.assertEqual(map_nested(a__ , a__ , num_proc=a__) , a__)
self.assertEqual(map_nested(a__ , a__ , num_proc=a__) , a__)
self.assertEqual(map_nested(a__ , a__ , num_proc=a__) , a__)
self.assertEqual(map_nested(a__ , a__ , num_proc=a__) , a__)
_lowerCamelCase : Any = {'''a''': np.eye(2), '''b''': np.zeros(3), '''c''': np.ones(2)}
_lowerCamelCase : Optional[int] = {'''a''': 2, '''b''': 0, '''c''': 2}
_lowerCamelCase : Optional[int] = {
'''a''': np.eye(2).astype(a__),
'''b''': np.zeros(3).astype(a__),
'''c''': np.ones(2).astype(a__),
}
self.assertEqual(map_nested(a__ , a__ , map_numpy=a__) , a__)
self.assertEqual(
{k: v.tolist() for k, v in map_nested(a__ , a__ , map_numpy=a__).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(a__ , a__ , map_numpy=a__ , num_proc=a__) , a__)
self.assertEqual(
{k: v.tolist() for k, v in map_nested(a__ , a__ , map_numpy=a__ , num_proc=a__).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(a__): # can't pickle a local lambda
map_nested(lambda a__: x + 1 , a__ , num_proc=a__)
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Dict = {'''a''': 1, '''b''': 2}
_lowerCamelCase : Optional[int] = {'''a''': 3, '''b''': 4}
_lowerCamelCase : int = {'''a''': 5, '''b''': 6}
_lowerCamelCase : Optional[int] = sorted([('''a''', (1, 3, 5)), ('''b''', (2, 4, 6))])
self.assertEqual(sorted(zip_dict(a__ , a__ , a__)) , a__)
def __snake_case ( self):
"""simple docstring"""
class __A :
"""simple docstring"""
UpperCAmelCase__ = """bar"""
_lowerCamelCase : Any = Foo()
self.assertEqual(foo.my_attr , '''bar''')
with temporary_assignment(a__ , '''my_attr''' , '''BAR'''):
self.assertEqual(foo.my_attr , '''BAR''')
self.assertEqual(foo.my_attr , '''bar''')
@pytest.mark.parametrize(
'''iterable_length, num_proc, expected_num_proc''' , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_ ):
with patch('''datasets.utils.py_utils._single_map_nested''' ) as mock_single_map_nested, patch(
'''datasets.parallel.parallel.Pool''' ) as mock_multiprocessing_pool:
_lowerCamelCase : Union[str, Any] = {F"""{i}""": i for i in range(lowercase_ )}
_lowerCamelCase : List[str] = map_nested(lambda lowercase_ : x + 10 , lowercase_ , num_proc=lowercase_ , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class __A ( lowerCamelCase__ ):
"""simple docstring"""
@require_tf
def __snake_case ( self):
"""simple docstring"""
import tensorflow as tf
from tensorflow.keras import layers
_lowerCamelCase : int = layers.Dense(2)
def gen_random_output():
_lowerCamelCase : Union[str, Any] = tf.random.uniform((1, 3))
return model(a__).numpy()
with temp_seed(42 , set_tensorflow=a__):
_lowerCamelCase : List[str] = gen_random_output()
with temp_seed(42 , set_tensorflow=a__):
_lowerCamelCase : Any = gen_random_output()
_lowerCamelCase : str = gen_random_output()
np.testing.assert_equal(a__ , a__)
self.assertGreater(np.abs(outa - outa).sum() , 0)
@require_torch
def __snake_case ( self):
"""simple docstring"""
import torch
def gen_random_output():
_lowerCamelCase : Union[str, Any] = torch.nn.Linear(3 , 2)
_lowerCamelCase : Dict = torch.rand(1 , 3)
return model(a__).detach().numpy()
with temp_seed(42 , set_pytorch=a__):
_lowerCamelCase : Any = gen_random_output()
with temp_seed(42 , set_pytorch=a__):
_lowerCamelCase : Optional[int] = gen_random_output()
_lowerCamelCase : Union[str, Any] = gen_random_output()
np.testing.assert_equal(a__ , a__)
self.assertGreater(np.abs(outa - outa).sum() , 0)
def __snake_case ( self):
"""simple docstring"""
def gen_random_output():
return np.random.rand(1 , 3)
with temp_seed(42):
_lowerCamelCase : Union[str, Any] = gen_random_output()
with temp_seed(42):
_lowerCamelCase : List[str] = gen_random_output()
_lowerCamelCase : str = gen_random_output()
np.testing.assert_equal(a__ , a__)
self.assertGreater(np.abs(outa - outa).sum() , 0)
@pytest.mark.parametrize('''input_data''' , [{}] )
def __UpperCAmelCase( lowercase_ ):
_lowerCamelCase : List[Any] = NestedDataStructure(lowercase_ ).data
assert output_data == input_data
@pytest.mark.parametrize(
'''data, expected_output''' , [
({}, []),
([], []),
('''foo''', ['''foo''']),
(['''foo''', '''bar'''], ['''foo''', '''bar''']),
([['''foo''', '''bar''']], ['''foo''', '''bar''']),
([[['''foo'''], ['''bar''']]], ['''foo''', '''bar''']),
([[['''foo'''], '''bar''']], ['''foo''', '''bar''']),
({'''a''': 1, '''b''': 2}, [1, 2]),
({'''a''': [1, 2], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[[3], [4]]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, [4]]}, [1, 2, 3, 4]),
({'''a''': {'''1''': 1}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': [2]}, [1, 2]),
] , )
def __UpperCAmelCase( lowercase_ , lowercase_ ):
_lowerCamelCase : int = NestedDataStructure(lowercase_ ).flatten()
assert output == expected_output
def __UpperCAmelCase( ):
_lowerCamelCase : Any = A(x=1 , y='''foobar''' )
_lowerCamelCase : Union[str, Any] = {'''x''': 1, '''y''': '''foobar'''}
assert asdict(lowercase_ ) == expected_output
_lowerCamelCase : Optional[int] = {'''a''': {'''b''': A(x=10 , y='''foo''' )}, '''c''': [A(x=20 , y='''bar''' )]}
_lowerCamelCase : Union[str, Any] = {'''a''': {'''b''': {'''x''': 10, '''y''': '''foo'''}}, '''c''': [{'''x''': 20, '''y''': '''bar'''}]}
assert asdict(lowercase_ ) == expected_output
with pytest.raises(lowercase_ ):
asdict([1, A(x=10 , y='''foo''' )] )
def __UpperCAmelCase( lowercase_ ):
return text.split()
def __UpperCAmelCase( lowercase_ ):
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def __UpperCAmelCase( ):
with Pool(2 ) as pool:
_lowerCamelCase : Tuple = list(iflatmap_unordered(lowercase_ , _split_text , kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(lowercase_ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
_lowerCamelCase : Dict = list(iflatmap_unordered(lowercase_ , _split_text , kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(lowercase_ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
_lowerCamelCase : str = []
for yield_time, content in iflatmap_unordered(
lowercase_ , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'''content''': '''a'''}, {'''content''': '''b'''}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(lowercase_ )
assert out.count('''a''' ) == 2
assert out.count('''b''' ) == 2
assert len(lowercase_ ) == 4
| 613 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase_)
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = field(default='''audio-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True})
__SCREAMING_SNAKE_CASE = Features({'''audio''': Audio()})
__SCREAMING_SNAKE_CASE = Features({'''labels''': ClassLabel})
__SCREAMING_SNAKE_CASE = "audio"
__SCREAMING_SNAKE_CASE = "labels"
def __lowerCamelCase ( self , lowercase ) -> List[str]:
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , lowercase ):
raise ValueError(f"Column {self.label_column} is not a ClassLabel." )
__UpperCamelCase = copy.deepcopy(self )
__UpperCamelCase = self.label_schema.copy()
__UpperCamelCase = features[self.label_column]
__UpperCamelCase = label_schema
return task_template
@property
def __lowerCamelCase ( self ) -> Dict[str, str]:
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 601 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 601 | 1 |
'''simple docstring'''
import requests
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : str ) -> None:
UpperCAmelCase_ : List[str] = {'''Content-Type''': '''application/json'''}
UpperCAmelCase_ : Optional[Any] = requests.post(SCREAMING_SNAKE_CASE__, json={'''text''': message_body}, headers=SCREAMING_SNAKE_CASE__ )
if response.status_code != 200:
UpperCAmelCase_ : str = (
'''Request to slack returned an error '''
F"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>")
| 644 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __a (lowerCamelCase ):
__a : int = "dandelin/vilt-b32-finetuned-vqa"
__a : Any = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
__a : Any = "image_qa"
__a : str = AutoProcessor
__a : Any = AutoModelForVisualQuestionAnswering
__a : List[Any] = ["image", "text"]
__a : int = ["text"]
def __init__( self : Tuple , *__magic_name__ : Any , **__magic_name__ : Any ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''vision'''] )
super().__init__(*__magic_name__ , **__magic_name__ )
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : "Image" , __magic_name__ : str ) -> Tuple:
"""simple docstring"""
return self.pre_processor(__magic_name__ , __magic_name__ , return_tensors='''pt''' )
def UpperCAmelCase__ ( self : Any , __magic_name__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
with torch.no_grad():
return self.model(**__magic_name__ ).logits
def UpperCAmelCase__ ( self : int , __magic_name__ : int ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Dict = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 644 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase__ ( a_ ):
_SCREAMING_SNAKE_CASE : Tuple = ["image_processor", "tokenizer"]
_SCREAMING_SNAKE_CASE : Dict = "ViTImageProcessor"
_SCREAMING_SNAKE_CASE : Union[str, Any] = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__(self : List[str] , snake_case_ : List[Any]=None , snake_case_ : Optional[int]=None , **snake_case_ : Tuple ):
__a : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowercase_ , )
__a : Tuple = kwargs.pop('''feature_extractor''' )
__a : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowercase_ , lowercase_ )
def __call__(self : str , snake_case_ : Dict=None , snake_case_ : int=None , snake_case_ : Union[str, Any]=None , snake_case_ : Any=None , **snake_case_ : Union[str, Any] ):
if text is None and visual_prompt is None and images is None:
raise ValueError('''You have to specify either text, visual prompt or images.''' )
if text is not None and visual_prompt is not None:
raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''' )
if text is not None:
__a : str = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if visual_prompt is not None:
__a : Optional[Any] = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if images is not None:
__a : Optional[Any] = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if visual_prompt is not None and images is not None:
__a : Union[str, Any] = {
'''pixel_values''': image_features.pixel_values,
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
__a : Optional[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
__a : Union[str, Any] = {
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ )
def lowerCAmelCase (self : List[str] , *snake_case_ : List[str] , **snake_case_ : Optional[int] ):
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def lowerCAmelCase (self : Optional[Any] , *snake_case_ : str , **snake_case_ : List[Any] ):
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def lowerCAmelCase (self : List[str] ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowercase_ , )
return self.image_processor_class
@property
def lowerCAmelCase (self : List[Any] ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowercase_ , )
return self.image_processor
| 521 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( A_ ):
for i in range(len(A_ ) - 1 , 0 , -1 ):
lowerCAmelCase__ : Optional[Any] = False
for j in range(A_ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = unsorted[j - 1], unsorted[j]
lowerCAmelCase__ : Dict = True
for j in range(A_ ):
if unsorted[j] > unsorted[j + 1]:
lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = unsorted[j + 1], unsorted[j]
lowerCAmelCase__ : Any = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : int = input('''Enter numbers separated by a comma:\n''').strip()
__UpperCamelCase : Optional[Any] = [int(item) for item in user_input.split(''',''')]
print(F'''{cocktail_shaker_sort(unsorted) = }''')
| 450 | 0 |
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
lowerCAmelCase_ = '''docs/source/en/_toctree.yml'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = defaultdict(lowerCAmelCase__ )
_UpperCAmelCase = []
_UpperCAmelCase = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(lowerCAmelCase__ )
_UpperCAmelCase = new_doc_list
_UpperCAmelCase = [key for key, value in counts.items() if value > 1]
_UpperCAmelCase = []
for duplicate_key in duplicates:
_UpperCAmelCase = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(lowerCAmelCase__ ) > 1:
raise ValueError(
F"""{duplicate_key} is present several times in the documentation table of content at """
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
_UpperCAmelCase = sorted(lowerCAmelCase__,key=lambda SCREAMING_SNAKE_CASE : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(lowerCAmelCase__ ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(lowerCAmelCase__ )
# Sort
return overview_doc
def __lowerCamelCase ( SCREAMING_SNAKE_CASE=False ) -> Optional[Any]:
"""simple docstring"""
with open(lowerCAmelCase__,encoding='utf-8' ) as f:
_UpperCAmelCase = yaml.safe_load(f.read() )
# Get to the API doc
_UpperCAmelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_UpperCAmelCase = content[api_idx]['sections']
# Then to the model doc
_UpperCAmelCase = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
_UpperCAmelCase = api_doc[scheduler_idx]['sections']
_UpperCAmelCase = clean_doc_toc(lowerCAmelCase__ )
_UpperCAmelCase = False
if new_scheduler_doc != scheduler_doc:
_UpperCAmelCase = True
if overwrite:
_UpperCAmelCase = new_scheduler_doc
if diff:
if overwrite:
_UpperCAmelCase = api_doc
with open(lowerCAmelCase__,'w',encoding='utf-8' ) as f:
f.write(yaml.dump(lowerCAmelCase__,allow_unicode=lowerCAmelCase__ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def __lowerCamelCase ( SCREAMING_SNAKE_CASE=False ) -> Tuple:
"""simple docstring"""
with open(lowerCAmelCase__,encoding='utf-8' ) as f:
_UpperCAmelCase = yaml.safe_load(f.read() )
# Get to the API doc
_UpperCAmelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_UpperCAmelCase = content[api_idx]['sections']
# Then to the model doc
_UpperCAmelCase = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
_UpperCAmelCase = False
_UpperCAmelCase = api_doc[pipeline_idx]['sections']
_UpperCAmelCase = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
_UpperCAmelCase = pipeline_doc['section']
_UpperCAmelCase = clean_doc_toc(lowerCAmelCase__ )
if overwrite:
_UpperCAmelCase = new_sub_pipeline_doc
new_pipeline_docs.append(lowerCAmelCase__ )
# sort overall pipeline doc
_UpperCAmelCase = clean_doc_toc(lowerCAmelCase__ )
if new_pipeline_docs != pipeline_docs:
_UpperCAmelCase = True
if overwrite:
_UpperCAmelCase = new_pipeline_docs
if diff:
if overwrite:
_UpperCAmelCase = api_doc
with open(lowerCAmelCase__,'w',encoding='utf-8' ) as f:
f.write(yaml.dump(lowerCAmelCase__,allow_unicode=lowerCAmelCase__ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowerCAmelCase_ = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 721 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class lowerCAmelCase ( unittest.TestCase ):
def __init__( self , a__ , a__=7 , a__=3 , a__=18 , a__=30 , a__=4_00 , a__=True , a__=None , a__=True , a__=None , a__=True , a__=[0.48_145_466, 0.4_578_275, 0.40_821_073] , a__=[0.26_862_954, 0.26_130_258, 0.27_577_711] , a__=True , ):
_UpperCAmelCase = size if size is not None else {'height': 2_24, 'width': 2_24}
_UpperCAmelCase = crop_size if crop_size is not None else {'height': 18, 'width': 18}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
_UpperCAmelCase = do_convert_rgb
def __A ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __A ( self , a__=False , a__=False , a__=False ):
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
_UpperCAmelCase = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
2_55 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
_UpperCAmelCase = []
for i in range(self.batch_size ):
_UpperCAmelCase , _UpperCAmelCase = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(2_55 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
_UpperCAmelCase = [Image.fromarray(np.moveaxis(a__ , 0 , -1 ) ) for x in image_inputs]
if torchify:
_UpperCAmelCase = [torch.from_numpy(a__ ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class lowerCAmelCase ( snake_case , unittest.TestCase ):
lowerCAmelCase__ = ChineseCLIPImageProcessor if is_vision_available() else None
def __A ( self ):
_UpperCAmelCase = ChineseCLIPImageProcessingTester(self , do_center_crop=a__ )
@property
def __A ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ):
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a__ , 'do_resize' ) )
self.assertTrue(hasattr(a__ , 'size' ) )
self.assertTrue(hasattr(a__ , 'do_center_crop' ) )
self.assertTrue(hasattr(a__ , 'center_crop' ) )
self.assertTrue(hasattr(a__ , 'do_normalize' ) )
self.assertTrue(hasattr(a__ , 'image_mean' ) )
self.assertTrue(hasattr(a__ , 'image_std' ) )
self.assertTrue(hasattr(a__ , 'do_convert_rgb' ) )
def __A ( self ):
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 2_24, 'width': 2_24} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def __A ( self ):
pass
def __A ( self ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = self.image_processor_tester.prepare_inputs(equal_resolution=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(a__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __A ( self ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = self.image_processor_tester.prepare_inputs(equal_resolution=a__ , numpify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(a__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __A ( self ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = self.image_processor_tester.prepare_inputs(equal_resolution=a__ , torchify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(a__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
@require_torch
@require_vision
class lowerCAmelCase ( snake_case , unittest.TestCase ):
lowerCAmelCase__ = ChineseCLIPImageProcessor if is_vision_available() else None
def __A ( self ):
_UpperCAmelCase = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=a__ )
_UpperCAmelCase = 3
@property
def __A ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ):
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a__ , 'do_resize' ) )
self.assertTrue(hasattr(a__ , 'size' ) )
self.assertTrue(hasattr(a__ , 'do_center_crop' ) )
self.assertTrue(hasattr(a__ , 'center_crop' ) )
self.assertTrue(hasattr(a__ , 'do_normalize' ) )
self.assertTrue(hasattr(a__ , 'image_mean' ) )
self.assertTrue(hasattr(a__ , 'image_std' ) )
self.assertTrue(hasattr(a__ , 'do_convert_rgb' ) )
def __A ( self ):
pass
def __A ( self ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = self.image_processor_tester.prepare_inputs(equal_resolution=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(a__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 494 | 0 |
from __future__ import annotations
def __UpperCamelCase ( A , A , A ):
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def __UpperCamelCase ( A , A , A , ):
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def __UpperCamelCase ( A , A , A , ):
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
A , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 415 | import torch
from diffusers import DiffusionPipeline
class _A ( __UpperCamelCase ):
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
def __call__(self ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
UpperCamelCase__ = 1
UpperCamelCase__ = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
UpperCamelCase__ = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase__ = scheduler_output - scheduler_output + torch.ones_like(SCREAMING_SNAKE_CASE_ )
return result
| 415 | 1 |
"""simple docstring"""
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : Tuple = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def __UpperCAmelCase ( __UpperCamelCase = 50_00 ):
__lowercase : Optional[Any] = [(i * (3 * i - 1)) // 2 for i in range(1 , __UpperCamelCase )]
for i, pentagonal_i in enumerate(__UpperCamelCase ):
for j in range(__UpperCamelCase , len(__UpperCamelCase ) ):
__lowercase : int = pentagonal_nums[j]
__lowercase : List[Any] = pentagonal_i + pentagonal_j
__lowercase : List[str] = pentagonal_j - pentagonal_i
if is_pentagonal(__UpperCamelCase ) and is_pentagonal(__UpperCamelCase ):
return b
return -1
if __name__ == "__main__":
print(F"{solution() = }")
| 523 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json',
'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json',
}
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase ="markuplm"
def __init__( self , UpperCamelCase_=3_05_22 , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , UpperCamelCase_=0 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=2_56 , UpperCamelCase_=10_24 , UpperCamelCase_=2_16 , UpperCamelCase_=10_01 , UpperCamelCase_=32 , UpperCamelCase_=50 , UpperCamelCase_="absolute" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ) -> List[str]:
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
__lowercase : Optional[int] = vocab_size
__lowercase : Optional[int] = hidden_size
__lowercase : Tuple = num_hidden_layers
__lowercase : Optional[int] = num_attention_heads
__lowercase : Tuple = hidden_act
__lowercase : Optional[Any] = intermediate_size
__lowercase : str = hidden_dropout_prob
__lowercase : Tuple = attention_probs_dropout_prob
__lowercase : Optional[int] = max_position_embeddings
__lowercase : List[str] = type_vocab_size
__lowercase : Dict = initializer_range
__lowercase : Dict = layer_norm_eps
__lowercase : Any = position_embedding_type
__lowercase : int = use_cache
__lowercase : str = classifier_dropout
# additional properties
__lowercase : List[Any] = max_depth
__lowercase : List[Any] = max_xpath_tag_unit_embeddings
__lowercase : List[str] = max_xpath_subs_unit_embeddings
__lowercase : List[Any] = tag_pad_id
__lowercase : Tuple = subs_pad_id
__lowercase : Optional[int] = xpath_unit_hidden_size
| 523 | 1 |
import re
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> str:
if len(re.findall('[ATCG]' , snake_case__ ) ) != len(snake_case__ ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 67 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> list:
_lowercase = [0] * len(snake_case__ )
for i in range(1 , len(snake_case__ ) ):
# use last results for better performance - dynamic programming
_lowercase = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_lowercase = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_lowercase = j
return prefix_result
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> int:
return max(prefix_function(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 67 | 1 |
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
SCREAMING_SNAKE_CASE = object()
# For specifying empty leaf dict `{}`
SCREAMING_SNAKE_CASE = object()
def lowercase_ ( __A : str , __A : int ) -> Optional[Any]:
"""simple docstring"""
lowercase : str =tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(__A ) - len(__A ) + 1 ):
lowercase : Optional[int] =[x.match(__A ) for x, y in zip(__A , ks[i:] )]
if matches and all(__A ):
return True
return False
def lowercase_ ( __A : Optional[int] ) -> Dict:
"""simple docstring"""
def replace(__A : str , __A : Tuple ):
for rule, replacement in rules:
if _match(__A , __A ):
return replacement
return val
return replace
def lowercase_ ( ) -> Dict:
"""simple docstring"""
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , __A )),
(("transformer", "wte", "embedding"), P('''mp''' , __A )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__A , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , __A )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__A , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , __A )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def lowercase_ ( __A : Any ) -> Optional[Any]:
"""simple docstring"""
lowercase : int =_get_partition_rules()
lowercase : Optional[int] =_replacement_rules(__A )
lowercase : Dict ={k: _unmatched for k in flatten_dict(__A )}
lowercase : int ={k: replace(__A , __A ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__A ) )
| 8 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Union[str, Any] =0
def A__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Tuple =AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Any ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : int =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : str =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : str =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : int =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : int =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : Optional[Any] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : str =CLIPConfig()
# Create a dummy config file with image_proceesor_type
lowercase : Optional[int] =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : Optional[Any] =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
lowercase : Dict =AutoImageProcessor.from_pretrained(UpperCAmelCase ).to_dict()
config_dict.pop('''image_processor_type''' )
lowercase : str =CLIPImageProcessor(**UpperCAmelCase )
# save in new folder
model_config.save_pretrained(UpperCAmelCase )
config.save_pretrained(UpperCAmelCase )
lowercase : Optional[int] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
# make sure private variable is not incorrectly saved
lowercase : int =json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : str ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Dict =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
lowercase : Optional[Any] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : int ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , '''clip-base is not a local folder and is not a valid model identifier''' ):
lowercase : Union[str, Any] =AutoImageProcessor.from_pretrained('''clip-base''' )
def A__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowercase : Any =AutoImageProcessor.from_pretrained(UpperCAmelCase , revision='''aaaaaa''' )
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
lowercase : Optional[int] =AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def A__ ( self : List[str] ) -> str:
'''simple docstring'''
with self.assertRaises(UpperCAmelCase ):
lowercase : Dict =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCAmelCase ):
lowercase : List[str] =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
lowercase : Union[str, Any] =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase )
lowercase : Any =AutoImageProcessor.from_pretrained(UpperCAmelCase , trust_remote_code=UpperCAmelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
try:
AutoConfig.register('''custom''' , UpperCAmelCase )
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase ):
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Any =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : str =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : Optional[int] =CustomImageProcessor.from_pretrained(UpperCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase )
lowercase : Dict =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def A__ ( self : Any ) -> Any:
'''simple docstring'''
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = True
try:
AutoConfig.register('''custom''' , UpperCAmelCase )
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
# If remote code is not set, the default is to use local
lowercase : List[str] =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
lowercase : Tuple =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
lowercase : Dict =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(UpperCAmelCase , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 8 | 1 |
"""simple docstring"""
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def UpperCAmelCase ( _lowercase : str , _lowercase : str , **_lowercase : Any ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase_ = AutoConfig.from_pretrained(_lowercase , **_lowercase )
lowerCAmelCase_ = AutoModelForSeqaSeqLM.from_config(_lowercase )
model.save_pretrained(_lowercase )
AutoTokenizer.from_pretrained(_lowercase ).save_pretrained(_lowercase )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version) | 552 |
"""simple docstring"""
import math
from datetime import datetime, timedelta
def UpperCAmelCase ( _lowercase : int ) -> datetime:
"""simple docstring"""
lowerCAmelCase_ = year % 1_9
lowerCAmelCase_ = year % 4
lowerCAmelCase_ = year % 7
lowerCAmelCase_ = math.floor(year / 1_0_0 )
lowerCAmelCase_ = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
lowerCAmelCase_ = leap_day_inhibits / 4
lowerCAmelCase_ = (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
lowerCAmelCase_ = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
lowerCAmelCase_ = (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
lowerCAmelCase_ = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(_lowercase , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(_lowercase , 4 , 1_8 )
else:
return datetime(_lowercase , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (19_94, 20_00, 20_10, 20_21, 20_23):
lowercase_ = 'will be' if year > datetime.now().year else 'was'
print(f"""Easter in {year} {tense} {gauss_easter(year)}""") | 552 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
@property
def UpperCamelCase__ ( self : Dict ):
torch.manual_seed(0 )
_a = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def UpperCamelCase__ ( self : str ):
_a = self.dummy_uncond_unet
_a = ScoreSdeVeScheduler()
_a = ScoreSdeVePipeline(unet=__a , scheduler=__a )
sde_ve.to(__a )
sde_ve.set_progress_bar_config(disable=__a )
_a = torch.manual_seed(0 )
_a = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=__a ).images
_a = torch.manual_seed(0 )
_a = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=__a , return_dict=__a )[
0
]
_a = image[0, -3:, -3:, -1]
_a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_a = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : int ):
_a = "google/ncsnpp-church-256"
_a = UNetaDModel.from_pretrained(__a )
_a = ScoreSdeVeScheduler.from_pretrained(__a )
_a = ScoreSdeVePipeline(unet=__a , scheduler=__a )
sde_ve.to(__a )
sde_ve.set_progress_bar_config(disable=__a )
_a = torch.manual_seed(0 )
_a = sde_ve(num_inference_steps=10 , output_type="numpy" , generator=__a ).images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
_a = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 716 |
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase ( lowercase : str , lowercase : int , lowercase : Any , lowercase : Any ) -> Any:
# Initialise PyTorch model
_a = FunnelConfig.from_json_file(lowercase )
print(F'Building PyTorch model from configuration: {config}' )
_a = FunnelBaseModel(lowercase ) if base_model else FunnelModel(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(lowercase , lowercase , lowercase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , lowercase )
if __name__ == "__main__":
lowerCAmelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--base_model', action='store_true', help='Whether you want just the base model (no decoder) or not.'
)
lowerCAmelCase_ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 521 | 0 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Optional[Any] = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(snake_case, snake_case )
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ , __magic_name__ :Tuple = emb.weight.shape
__magic_name__ :int = nn.Linear(snake_case, snake_case, bias=snake_case )
__magic_name__ :str = emb.weight.data
return lin_layer
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :int = torch.load(snake_case, map_location='''cpu''' )
__magic_name__ :Optional[Any] = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
__magic_name__ :List[Any] = mam_aaa['''model''']
remove_ignore_keys_(snake_case )
__magic_name__ :Tuple = state_dict['''encoder.embed_tokens.weight'''].shape[0]
__magic_name__ :List[str] = MaMaaaConfig(
vocab_size=snake_case, max_position_embeddings=1_0_2_4, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, encoder_layerdrop=args.encoder_layerdrop, decoder_layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='''relu''', )
__magic_name__ :int = state_dict['''decoder.embed_tokens.weight''']
__magic_name__ :List[str] = MaMaaaForConditionalGeneration(snake_case )
model.model.load_state_dict(snake_case, strict=snake_case )
__magic_name__ :List[str] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
SCREAMING_SNAKE_CASE__ : int = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Any = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE ( metaclass=snake_case_ ):
__magic_name__ : Dict = ['''transformers''', '''torch''', '''note_seq''']
def __init__( self : List[str] , *lowercase__ : Dict , **lowercase__ : int ):
'''simple docstring'''
requires_backends(self , ["""transformers""", """torch""", """note_seq"""] )
@classmethod
def lowercase_ ( cls : Dict , *lowercase__ : List[str] , **lowercase__ : str ):
'''simple docstring'''
requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] )
@classmethod
def lowercase_ ( cls : str , *lowercase__ : Optional[int] , **lowercase__ : Any ):
'''simple docstring'''
requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] )
| 442 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowercase__ )
class _a ( lowercase__ ):
a_ : List[Any] = field(default='summarization' , metadata={'include_in_asdict_even_if_is_default': True} )
a_ : Optional[int] = Features({'text': Value('string' )} )
a_ : Any = Features({'summary': Value('string' )} )
a_ : str = 'text'
a_ : Optional[int] = 'summary'
@property
def _UpperCamelCase ( self : str ):
return {self.text_column: "text", self.summary_column: "summary"}
| 705 |
"""simple docstring"""
from __future__ import annotations
_snake_case = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def snake_case ( _a: list[list[int]] , _a: list[int] , _a: list[int] , _a: int , _a: list[list[int]] , )-> tuple[list[list[int]], list[list[int]]]:
'''simple docstring'''
lowerCamelCase__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_a ) )
] # the reference grid
lowerCamelCase__ = 1
lowerCamelCase__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_a ) )
] # the action grid
lowerCamelCase__ = init[0]
lowerCamelCase__ = init[1]
lowerCamelCase__ = 0
lowerCamelCase__ = g + heuristic[x][y] # cost from starting cell to destination cell
lowerCamelCase__ = [[f, g, x, y]]
lowerCamelCase__ = False # flag that is set when search is complete
lowerCamelCase__ = False # flag set if we can't find expand
while not found and not resign:
if len(_a ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
lowerCamelCase__ = cell.pop()
lowerCamelCase__ = next_cell[2]
lowerCamelCase__ = next_cell[3]
lowerCamelCase__ = next_cell[1]
if x == goal[0] and y == goal[1]:
lowerCamelCase__ = True
else:
for i in range(len(_a ) ): # to try out different valid actions
lowerCamelCase__ = x + DIRECTIONS[i][0]
lowerCamelCase__ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_a ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
lowerCamelCase__ = g + cost
lowerCamelCase__ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
lowerCamelCase__ = 1
lowerCamelCase__ = i
lowerCamelCase__ = []
lowerCamelCase__ = goal[0]
lowerCamelCase__ = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
lowerCamelCase__ = x - DIRECTIONS[action[x][y]][0]
lowerCamelCase__ = y - DIRECTIONS[action[x][y]][1]
lowerCamelCase__ = xa
lowerCamelCase__ = ya
invpath.append([x, y] )
lowerCamelCase__ = []
for i in range(len(_a ) ):
path.append(invpath[len(_a ) - 1 - i] )
return path, action
if __name__ == "__main__":
_snake_case = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
_snake_case = [0, 0]
# all coordinates are given in format [y,x]
_snake_case = [len(grid) - 1, len(grid[0]) - 1]
_snake_case = 1
# the cost map which pushes the path closer to the goal
_snake_case = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
_snake_case = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
_snake_case = 99
_snake_case , _snake_case = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 659 | 0 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _snake_case ( A_ : List[str] ):
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _UpperCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
super().__init__()
a_ : Optional[Any] = module
a_ : Any = nn.Sequential(
nn.Linear(module.in_features , _a , bias=_a ) , nn.Linear(_a , module.out_features , bias=_a ) , )
a_ : Dict = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=_a )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def _lowerCAmelCase ( self , lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ):
'''simple docstring'''
return self.module(_a , *_a , **_a ) + self.adapter(_a )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
a_ = '''bigscience/bloom-1b7'''
# Constant values
a_ = 2.109659552692574
a_ = '''Hello my name is'''
a_ = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
a_ = 10
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[int] = AutoTokenizer.from_pretrained(self.model_name )
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# Models and tokenizer
a_ : List[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="""auto""" )
a_ : Optional[Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_a , device_map="""auto""" )
def _lowerCAmelCase ( self ):
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : List[str] = self.model_abit.config
self.assertTrue(hasattr(_a , """quantization_config""" ) )
a_ : Any = config.to_dict()
a_ : Optional[Any] = config.to_diff_dict()
a_ : List[str] = config.to_json_string()
def _lowerCAmelCase ( self ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
a_ : Dict = self.model_fpaa.get_memory_footprint()
a_ : Optional[int] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
a_ : Tuple = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def _lowerCAmelCase ( self ):
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(_a , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Union[str, Any] = self.tokenizer(self.input_text , return_tensors="""pt""" )
a_ : int = self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_a ) , self.EXPECTED_OUTPUTS )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Union[str, Any] = BitsAndBytesConfig()
a_ : Tuple = True
a_ : int = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_a , device_map="""auto""" )
a_ : List[str] = self.tokenizer(self.input_text , return_tensors="""pt""" )
a_ : List[str] = model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_a ) , self.EXPECTED_OUTPUTS )
def _lowerCAmelCase ( self ):
'''simple docstring'''
with self.assertRaises(_a ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(_a )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : List[str] = BitsAndBytesConfig()
with self.assertRaises(_a ):
a_ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_a , load_in_abit=_a , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , )
def _lowerCAmelCase ( self ):
'''simple docstring'''
with self.assertRaises(_a ):
# Tries with `str`
self.model_abit.to("""cpu""" )
with self.assertRaises(_a ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(_a ):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0""" ) )
with self.assertRaises(_a ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(_a ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
a_ : Any = self.tokenizer(self.input_text , return_tensors="""pt""" )
a_ : Tuple = self.model_fpaa.to(torch.floataa )
a_ : List[Any] = self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
a_ : List[str] = self.model_fpaa.to("""cpu""" )
# Check this does not throw an error
a_ : Optional[int] = self.model_fpaa.half()
# Check this does not throw an error
a_ : Dict = self.model_fpaa.float()
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Tuple = AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=_a , device_map="""auto""" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def _lowerCAmelCase ( cls ):
'''simple docstring'''
a_ : List[Any] = """t5-small"""
a_ : str = """google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense
a_ : int = AutoTokenizer.from_pretrained(cls.model_name )
a_ : List[Any] = """Translate in German: Hello, my dog is cute"""
def _lowerCAmelCase ( self ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ):
'''simple docstring'''
from transformers import TaForConditionalGeneration
a_ : List[Any] = TaForConditionalGeneration._keep_in_fpaa_modules
a_ : Optional[int] = None
# test with `t5-small`
a_ : List[Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_a , device_map="""auto""" )
a_ : List[Any] = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
a_ : Any = model.generate(**_a )
# test with `flan-t5-small`
a_ : int = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_a , device_map="""auto""" )
a_ : Dict = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
a_ : Optional[int] = model.generate(**_a )
a_ : Optional[Any] = modules
def _lowerCAmelCase ( self ):
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
a_ : List[Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_a , device_map="""auto""" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
a_ : Optional[Any] = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
a_ : Optional[int] = model.generate(**_a )
# test with `flan-t5-small`
a_ : str = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_a , device_map="""auto""" )
a_ : List[str] = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
a_ : Union[str, Any] = model.generate(**_a )
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# model_name
a_ : int = """bigscience/bloom-560m"""
a_ : Optional[Any] = """t5-small"""
# Different types of model
a_ : List[Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=_a , device_map="""auto""" )
# Sequence classification model
a_ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=_a , device_map="""auto""" )
# CausalLM model
a_ : int = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_a , device_map="""auto""" )
# Seq2seq model
a_ : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=_a , device_map="""auto""" )
def _lowerCAmelCase ( self ):
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
'''simple docstring'''
super().setUp()
def _lowerCAmelCase ( self ):
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Any = pipeline(
"""text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
a_ : Optional[int] = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
'''simple docstring'''
super().setUp()
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=_a , device_map="""balanced""" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
a_ : int = self.tokenizer(self.input_text , return_tensors="""pt""" )
# Second real batch
a_ : Optional[Any] = model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=_a ) , self.EXPECTED_OUTPUTS )
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Union[str, Any] = """facebook/opt-350m"""
super().setUp()
def _lowerCAmelCase ( self ):
'''simple docstring'''
if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ):
return
# Step 1: freeze all parameters
a_ : Optional[Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_a )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
a_ : int = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
a_ : Optional[Any] = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(_a ) ):
a_ : Optional[int] = LoRALayer(module.q_proj , rank=16 )
a_ : str = LoRALayer(module.k_proj , rank=16 )
a_ : Union[str, Any] = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
a_ : int = self.tokenizer("""Test batch """ , return_tensors="""pt""" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
a_ : Dict = model.forward(**_a )
out.logits.norm().backward()
for module in model.modules():
if isinstance(_a , _a ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(_a , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
a_ = '''gpt2-xl'''
a_ = 3.3191854854152187
| 577 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __lowercase ( A ):
'''simple docstring'''
_A : Optional[Any] = '''naver-clova-ix/donut-base-finetuned-docvqa'''
_A : str = (
'''This is a tool that answers a question about an document (pdf). It takes an input named `document` which '''
'''should be the document containing the information, as well as a `question` that is the question about the '''
'''document. It returns a text that contains the answer to the question.'''
)
_A : str = '''document_qa'''
_A : List[str] = AutoProcessor
_A : Dict = VisionEncoderDecoderModel
_A : List[Any] = ['''image''', '''text''']
_A : List[Any] = ['''text''']
def __init__( self : int , *_a : Any , **_a : Optional[Any] ):
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*_a , **_a )
def A_ ( self : int , _a : "Image" , _a : str ):
UpperCamelCase__ = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
UpperCamelCase__ = task_prompt.replace('''{user_input}''' , _a )
UpperCamelCase__ = self.pre_processor.tokenizer(
_a , add_special_tokens=_a , return_tensors='''pt''' ).input_ids
UpperCamelCase__ = self.pre_processor(_a , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def A_ ( self : Optional[Any] , _a : Any ):
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_a , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_a , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_a , ).sequences
def A_ ( self : Dict , _a : Optional[int] ):
UpperCamelCase__ = self.pre_processor.batch_decode(_a )[0]
UpperCamelCase__ = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
UpperCamelCase__ = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
UpperCamelCase__ = re.sub(R'''<.*?>''' , '''''' , _a , count=1 ).strip() # remove first task start token
UpperCamelCase__ = self.pre_processor.tokenajson(_a )
return sequence["answer"]
| 240 | 0 |
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 10**-10 ):
SCREAMING_SNAKE_CASE_ :List[str] = a
while True:
SCREAMING_SNAKE_CASE_ :int = Decimal(SCREAMING_SNAKE_CASE ) - (
Decimal(eval(SCREAMING_SNAKE_CASE ) ) / Decimal(eval(str(diff(SCREAMING_SNAKE_CASE ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(SCREAMING_SNAKE_CASE ) ) < precision: # noqa: S307
return float(SCREAMING_SNAKE_CASE )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
print(f"""The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}""")
# Find Square Root of 5
print(f"""The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}""")
# Exponential Roots
print(f"""The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}""")
| 233 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE ): # This function is recursive
SCREAMING_SNAKE_CASE_ :Union[str, Any] = len(SCREAMING_SNAKE_CASE )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
SCREAMING_SNAKE_CASE_ :Optional[int] = array[0]
SCREAMING_SNAKE_CASE_ :Union[str, Any] = False
SCREAMING_SNAKE_CASE_ :List[Any] = 1
SCREAMING_SNAKE_CASE_ :list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
SCREAMING_SNAKE_CASE_ :List[Any] = True
SCREAMING_SNAKE_CASE_ :int = [element for element in array[i:] if element >= array[i]]
SCREAMING_SNAKE_CASE_ :Optional[Any] = longest_subsequence(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > len(SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ :int = temp_array
else:
i += 1
SCREAMING_SNAKE_CASE_ :List[Any] = [element for element in array[1:] if element >= pivot]
SCREAMING_SNAKE_CASE_ :Optional[Any] = [pivot, *longest_subsequence(SCREAMING_SNAKE_CASE )]
if len(SCREAMING_SNAKE_CASE ) > len(SCREAMING_SNAKE_CASE ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 233 | 1 |
import argparse
import json
from tqdm import tqdm
def __UpperCAmelCase ( ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=lowerCamelCase_ , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=lowerCamelCase_ , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=lowerCamelCase_ , help='where to store parsed gold_data_path file' , )
SCREAMING_SNAKE_CASE_ : List[str] = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = json.load(lowerCamelCase_ )
for dpr_record in tqdm(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = dpr_record['question']
SCREAMING_SNAKE_CASE_ : List[Any] = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(lowerCamelCase_ ) + '\n' )
if __name__ == "__main__":
main()
| 105 |
def __UpperCAmelCase ( lowerCamelCase_ : str ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 0
for ch in input_str:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ord(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = pow(2 , lowerCamelCase_ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __UpperCamelCase ( lowercase__ ):
@staticmethod
@abstractmethod
def a__ ( _UpperCamelCase :ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def a__ ( self :Optional[Any] ):
raise NotImplementedError() | 710 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : str = logging.get_logger(__name__)
__A : Dict = {
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class __UpperCamelCase ( lowercase__ ):
lowercase : Tuple = 'pix2struct_text_model'
lowercase : int = ['past_key_values']
lowercase : int = {
'hidden_size': 'hidden_size',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self :int ,_UpperCamelCase :List[str]=5_0_2_4_4 ,_UpperCamelCase :Optional[Any]=7_6_8 ,_UpperCamelCase :Dict=6_4 ,_UpperCamelCase :Dict=2_0_4_8 ,_UpperCamelCase :Optional[int]=1_2 ,_UpperCamelCase :Union[str, Any]=1_2 ,_UpperCamelCase :List[str]=3_2 ,_UpperCamelCase :Union[str, Any]=1_2_8 ,_UpperCamelCase :Tuple=0.1 ,_UpperCamelCase :List[str]=1E-6 ,_UpperCamelCase :List[Any]=1.0 ,_UpperCamelCase :Optional[int]="gelu_new" ,_UpperCamelCase :Dict=0 ,_UpperCamelCase :List[str]=False ,_UpperCamelCase :Optional[int]=0 ,_UpperCamelCase :Dict=1 ,_UpperCamelCase :List[Any]=False ,_UpperCamelCase :Tuple=True ,**_UpperCamelCase :List[Any] ,):
snake_case_ : List[str] = vocab_size
snake_case_ : Any = hidden_size
snake_case_ : Any = d_kv
snake_case_ : List[Any] = d_ff
snake_case_ : Union[str, Any] = num_layers
snake_case_ : Union[str, Any] = num_heads
snake_case_ : str = relative_attention_num_buckets
snake_case_ : Optional[int] = relative_attention_max_distance
snake_case_ : Tuple = dropout_rate
snake_case_ : Tuple = layer_norm_epsilon
snake_case_ : Any = initializer_factor
snake_case_ : List[Any] = use_cache
snake_case_ : Optional[int] = eos_token_id
snake_case_ : List[Any] = decoder_start_token_id
# for backwards compatibility
snake_case_ : List[str] = dense_act_fn
super().__init__(
pad_token_id=_UpperCamelCase ,eos_token_id=_UpperCamelCase ,decoder_start_token_id=_UpperCamelCase ,tie_word_embeddings=_UpperCamelCase ,is_decoder=_UpperCamelCase ,**_UpperCamelCase ,)
@classmethod
def a__ ( cls :Optional[int] ,_UpperCamelCase :Union[str, os.PathLike] ,**_UpperCamelCase :Optional[int] ):
cls._set_token_in_kwargs(_UpperCamelCase )
snake_case_ , snake_case_ : Optional[Any] = cls.get_config_dict(_UpperCamelCase ,**_UpperCamelCase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
snake_case_ : Optional[int] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_UpperCamelCase ,**_UpperCamelCase )
class __UpperCamelCase ( lowercase__ ):
lowercase : Dict = 'pix2struct_vision_model'
def __init__( self :List[str] ,_UpperCamelCase :Any=7_6_8 ,_UpperCamelCase :List[str]=7_6_8 ,_UpperCamelCase :List[Any]=2_0_4_8 ,_UpperCamelCase :Union[str, Any]=6_4 ,_UpperCamelCase :int=1_2 ,_UpperCamelCase :int=1_2 ,_UpperCamelCase :Any="gelu_new" ,_UpperCamelCase :Optional[int]=1E-6 ,_UpperCamelCase :List[Any]=0.0 ,_UpperCamelCase :Union[str, Any]=0.0 ,_UpperCamelCase :int=1E-1_0 ,_UpperCamelCase :str=1.0 ,_UpperCamelCase :Optional[int]=4_0_9_6 ,_UpperCamelCase :str=3_2 ,_UpperCamelCase :Union[str, Any]=1_2_8 ,**_UpperCamelCase :Union[str, Any] ,):
super().__init__(**_UpperCamelCase )
snake_case_ : str = hidden_size
snake_case_ : Tuple = patch_embed_hidden_size
snake_case_ : Optional[int] = d_ff
snake_case_ : Dict = dropout_rate
snake_case_ : Optional[int] = num_hidden_layers
snake_case_ : Dict = num_attention_heads
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : Optional[Any] = initializer_factor
snake_case_ : List[str] = attention_dropout
snake_case_ : List[str] = layer_norm_eps
snake_case_ : List[str] = dense_act_fn
snake_case_ : int = seq_len
snake_case_ : str = relative_attention_num_buckets
snake_case_ : Tuple = relative_attention_max_distance
snake_case_ : List[str] = d_kv
@classmethod
def a__ ( cls :Optional[Any] ,_UpperCamelCase :Union[str, os.PathLike] ,**_UpperCamelCase :List[str] ):
cls._set_token_in_kwargs(_UpperCamelCase )
snake_case_ , snake_case_ : Tuple = cls.get_config_dict(_UpperCamelCase ,**_UpperCamelCase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
snake_case_ : Dict = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_UpperCamelCase ,**_UpperCamelCase )
class __UpperCamelCase ( lowercase__ ):
lowercase : Union[str, Any] = 'pix2struct'
lowercase : int = True
def __init__( self :int ,_UpperCamelCase :Tuple=None ,_UpperCamelCase :Optional[int]=None ,_UpperCamelCase :Dict=1.0 ,_UpperCamelCase :Optional[int]=0.02 ,_UpperCamelCase :Tuple=False ,_UpperCamelCase :List[str]=False ,_UpperCamelCase :Dict=True ,**_UpperCamelCase :Union[str, Any] ,):
super().__init__(tie_word_embeddings=_UpperCamelCase ,is_encoder_decoder=_UpperCamelCase ,**_UpperCamelCase )
if text_config is None:
snake_case_ : Optional[int] = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
snake_case_ : List[str] = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
snake_case_ : Optional[int] = PixaStructTextConfig(**_UpperCamelCase )
snake_case_ : List[str] = PixaStructVisionConfig(**_UpperCamelCase )
snake_case_ : Any = self.text_config.decoder_start_token_id
snake_case_ : List[Any] = self.text_config.pad_token_id
snake_case_ : Optional[int] = self.text_config.eos_token_id
snake_case_ : Optional[int] = initializer_factor
snake_case_ : List[str] = initializer_range
snake_case_ : Union[str, Any] = self.initializer_range
snake_case_ : str = self.initializer_range
snake_case_ : Dict = is_vqa
@classmethod
def a__ ( cls :Any ,_UpperCamelCase :PixaStructTextConfig ,_UpperCamelCase :PixaStructVisionConfig ,**_UpperCamelCase :Any ):
return cls(text_config=text_config.to_dict() ,vision_config=vision_config.to_dict() ,**_UpperCamelCase )
def a__ ( self :Optional[int] ):
snake_case_ : int = copy.deepcopy(self.__dict__ )
snake_case_ : str = self.text_config.to_dict()
snake_case_ : Tuple = self.vision_config.to_dict()
snake_case_ : Any = self.__class__.model_type
return output | 267 | 0 |
'''simple docstring'''
from functools import lru_cache
def __A ( lowerCAmelCase_ ):
_UpperCAmelCase : List[str] = 2
_UpperCAmelCase : Tuple = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(lowerCAmelCase_ )
if n > 1:
factors.add(lowerCAmelCase_ )
return factors
@lru_cache
def __A ( lowerCAmelCase_ ):
return len(unique_prime_factors(lowerCAmelCase_ ) )
def __A ( lowerCAmelCase_ ):
return len(set(lowerCAmelCase_ ) ) in (0, 1)
def __A ( lowerCAmelCase_ ):
_UpperCAmelCase : Optional[Any] = 2
while True:
# Increment each value of a generated range
_UpperCAmelCase : str = [base + i for i in range(lowerCAmelCase_ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
_UpperCAmelCase : str = [upf_len(lowerCAmelCase_ ) for x in group]
checker.append(lowerCAmelCase_ )
# If all numbers in the list are equal, return the group variable.
if equality(lowerCAmelCase_ ):
return group
# Increment our base variable by 1
base += 1
def __A ( lowerCAmelCase_ = 4 ):
_UpperCAmelCase : List[Any] = run(lowerCAmelCase_ )
return results[0] if len(lowerCAmelCase_ ) else None
if __name__ == "__main__":
print(solution())
| 414 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __lowerCAmelCase ( __a ):
snake_case : str = """cvt"""
def __init__(self , lowerCAmelCase__=3 , lowerCAmelCase__=[7, 3, 3] , lowerCAmelCase__=[4, 2, 2] , lowerCAmelCase__=[2, 1, 1] , lowerCAmelCase__=[6_4, 1_9_2, 3_8_4] , lowerCAmelCase__=[1, 3, 6] , lowerCAmelCase__=[1, 2, 1_0] , lowerCAmelCase__=[4.0, 4.0, 4.0] , lowerCAmelCase__=[0.0, 0.0, 0.0] , lowerCAmelCase__=[0.0, 0.0, 0.0] , lowerCAmelCase__=[0.0, 0.0, 0.1] , lowerCAmelCase__=[True, True, True] , lowerCAmelCase__=[False, False, True] , lowerCAmelCase__=["dw_bn", "dw_bn", "dw_bn"] , lowerCAmelCase__=[3, 3, 3] , lowerCAmelCase__=[1, 1, 1] , lowerCAmelCase__=[2, 2, 2] , lowerCAmelCase__=[1, 1, 1] , lowerCAmelCase__=[1, 1, 1] , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1e-12 , **lowerCAmelCase__ , ):
super().__init__(**lowerCAmelCase__ )
_UpperCAmelCase : int = num_channels
_UpperCAmelCase : Any = patch_sizes
_UpperCAmelCase : Any = patch_stride
_UpperCAmelCase : Optional[int] = patch_padding
_UpperCAmelCase : Any = embed_dim
_UpperCAmelCase : List[Any] = num_heads
_UpperCAmelCase : List[Any] = depth
_UpperCAmelCase : Tuple = mlp_ratio
_UpperCAmelCase : Optional[Any] = attention_drop_rate
_UpperCAmelCase : Dict = drop_rate
_UpperCAmelCase : Union[str, Any] = drop_path_rate
_UpperCAmelCase : List[Any] = qkv_bias
_UpperCAmelCase : List[Any] = cls_token
_UpperCAmelCase : Tuple = qkv_projection_method
_UpperCAmelCase : str = kernel_qkv
_UpperCAmelCase : Tuple = padding_kv
_UpperCAmelCase : Dict = stride_kv
_UpperCAmelCase : int = padding_q
_UpperCAmelCase : Union[str, Any] = stride_q
_UpperCAmelCase : Union[str, Any] = initializer_range
_UpperCAmelCase : Union[str, Any] = layer_norm_eps
| 414 | 1 |
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
UpperCamelCase : Optional[int] = re.compile(R"""([A-Z]+)([A-Z][a-z])""")
UpperCamelCase : List[Any] = re.compile(R"""([a-z\d])([A-Z])""")
UpperCamelCase : List[Any] = re.compile(R"""(?<!_)_(?!_)""")
UpperCamelCase : Optional[int] = re.compile(R"""(_{2,})""")
UpperCamelCase : Union[str, Any] = R"""^\w+(\.\w+)*$"""
UpperCamelCase : Tuple = R"""<>:/\|?*"""
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] ) -> List[Any]:
"""simple docstring"""
a : Optional[int] = _uppercase_uppercase_re.sub(R'\1_\2' , snake_case )
a : List[Any] = _lowercase_uppercase_re.sub(R'\1_\2' , snake_case )
return name.lower()
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] ) -> Tuple:
"""simple docstring"""
a : List[str] = _single_underscore_re.split(snake_case )
a : int = [_multiple_underscores_re.split(snake_case ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(snake_case ) if n != '' )
def SCREAMING_SNAKE_CASE__ ( snake_case : Any ) -> Union[str, Any]:
"""simple docstring"""
if os.path.basename(snake_case ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
return camelcase_to_snakecase(snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , snake_case : Optional[Any] ) -> int:
"""simple docstring"""
if os.path.basename(snake_case ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
if not re.match(_split_re , snake_case ):
raise ValueError(F"""Split name should match '{_split_re}'' but got '{split}'.""" )
return F"""{filename_prefix_for_name(snake_case )}-{split}"""
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : Union[str, Any]=None ) -> Any:
"""simple docstring"""
a : Optional[Any] = filename_prefix_for_split(snake_case , snake_case )
if filetype_suffix:
prefix += F""".{filetype_suffix}"""
a : List[str] = os.path.join(snake_case , snake_case )
return F"""{filepath}*"""
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : Optional[int] , snake_case : List[str]=None , snake_case : List[Any]=None ) -> str:
"""simple docstring"""
a : List[Any] = filename_prefix_for_split(snake_case , snake_case )
a : Optional[Any] = os.path.join(snake_case , snake_case )
if shard_lengths:
a : Dict = len(snake_case )
a : Dict = [F"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(snake_case )]
if filetype_suffix:
a : Any = [filename + F""".{filetype_suffix}""" for filename in filenames]
return filenames
else:
a : Dict = prefix
if filetype_suffix:
filename += F""".{filetype_suffix}"""
return [filename]
| 610 | '''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : str = ""
A : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
A : str = None # compression type in fsspec. ex: "gzip"
A : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : Optional[Any] , UpperCAmelCase_ : str = "" , UpperCAmelCase_ : Optional[str] = None , UpperCAmelCase_ : Optional[dict] = None , **UpperCAmelCase_ : int):
"""simple docstring"""
super().__init__(self , **UpperCAmelCase_)
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
a : str = fsspec.open(
UpperCAmelCase_ , mode='rb' , protocol=UpperCAmelCase_ , compression=self.compression , client_kwargs={
'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459
'trust_env': True, # Enable reading proxy env variables.
**(target_options or {}).pop('client_kwargs' , {}), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
a : Dict = os.path.basename(self.file.path.split('::')[0])
a : List[str] = (
self.compressed_name[: self.compressed_name.rindex('.')]
if '.' in self.compressed_name
else self.compressed_name
)
a : List[Any] = None
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Tuple , UpperCAmelCase_ : str):
"""simple docstring"""
return super()._strip_protocol(UpperCAmelCase_).lstrip('/')
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
if self.dir_cache is None:
a : Union[str, Any] = {**self.file.fs.info(self.file.path), 'name': self.uncompressed_name}
a : Any = {f['name']: f}
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : str):
"""simple docstring"""
return self.file.open().read()
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : str = "rb" , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Dict=None , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
a : Dict = self._strip_protocol(UpperCAmelCase_)
if mode != "rb":
raise ValueError(f"""Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'""")
return self.file.open()
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : int = "bz2"
A : Tuple = "bz2"
A : Any = ".bz2"
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Dict = "gzip"
A : List[Any] = "gzip"
A : str = ".gz"
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Tuple = "lz4"
A : Tuple = "lz4"
A : List[str] = ".lz4"
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : List[Any] = "xz"
A : Union[str, Any] = "xz"
A : str = ".xz"
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Optional[Any] = "zstd"
A : Union[str, Any] = "zstd"
A : str = ".zst"
def __init__( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : str = "rb" , UpperCAmelCase_ : Optional[str] = None , UpperCAmelCase_ : Optional[dict] = None , UpperCAmelCase_ : int = DEFAULT_BLOCK_SIZE , **UpperCAmelCase_ : List[Any] , ):
"""simple docstring"""
super().__init__(
fo=UpperCAmelCase_ , mode=UpperCAmelCase_ , target_protocol=UpperCAmelCase_ , target_options=UpperCAmelCase_ , block_size=UpperCAmelCase_ , **UpperCAmelCase_ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
a : List[Any] = self.file.__enter__
class UpperCamelCase :
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase_ : Tuple):
"""simple docstring"""
a : Optional[Any] = file_
def __enter__( self : int):
"""simple docstring"""
self._file.__enter__()
return self
def __exit__( self : Dict , *UpperCAmelCase_ : str , **UpperCAmelCase_ : List[str]):
"""simple docstring"""
self._file.__exit__(*UpperCAmelCase_ , **UpperCAmelCase_)
def __iter__( self : List[Any]):
"""simple docstring"""
return iter(self._file)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
return next(self._file)
def __getattr__( self : Dict , UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
return getattr(self._file , UpperCAmelCase_)
def fixed_enter(*UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[str]):
return WrappedFile(_enter(*UpperCAmelCase_ , **UpperCAmelCase_))
a : Union[str, Any] = fixed_enter
| 610 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE :Tuple = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :List[str] = {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json''',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : str = """realm"""
def __init__( self : Any , snake_case_ : Optional[Any]=3_0_5_2_2 , snake_case_ : Dict=7_6_8 , snake_case_ : Tuple=1_2_8 , snake_case_ : Optional[Any]=1_2 , snake_case_ : Union[str, Any]=1_2 , snake_case_ : Dict=8 , snake_case_ : str=3_0_7_2 , snake_case_ : Any="gelu_new" , snake_case_ : List[str]=0.1 , snake_case_ : Union[str, Any]=0.1 , snake_case_ : List[str]=5_1_2 , snake_case_ : Dict=2 , snake_case_ : Dict=0.0_2 , snake_case_ : Tuple=1e-12 , snake_case_ : Optional[int]=2_5_6 , snake_case_ : Any=1_0 , snake_case_ : Union[str, Any]=1e-3 , snake_case_ : int=5 , snake_case_ : int=3_2_0 , snake_case_ : Union[str, Any]=1_3_3_5_3_7_1_8 , snake_case_ : int=5_0_0_0 , snake_case_ : Optional[Any]=1 , snake_case_ : Optional[Any]=0 , snake_case_ : int=2 , **snake_case_ : Any , ):
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
# Common config
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = hidden_size
_UpperCAmelCase = retriever_proj_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = num_candidates
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = layer_norm_eps
# Reader config
_UpperCAmelCase = span_hidden_size
_UpperCAmelCase = max_span_width
_UpperCAmelCase = reader_layer_norm_eps
_UpperCAmelCase = reader_beam_size
_UpperCAmelCase = reader_seq_len
# Retrieval config
_UpperCAmelCase = num_block_records
_UpperCAmelCase = searcher_beam_size
| 236 |
'''simple docstring'''
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def UpperCAmelCase_ ( __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : int ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = AlbertConfig.from_json_file(__lowercase )
print(f'Building PyTorch model from configuration: {config}' )
_UpperCAmelCase = AlbertForPreTraining(__lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_albert(__lowercase , __lowercase , __lowercase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , __lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--albert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained ALBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__SCREAMING_SNAKE_CASE :Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 236 | 1 |
def UpperCAmelCase ( _lowerCamelCase ):
if num <= 0:
raise ValueError("Input must be a positive integer" )
A : Dict = [True] * (num + 1)
A : Dict = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , _lowerCamelCase ):
A : List[Any] = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE = int(input("""Enter a positive integer: """).strip())
print(prime_sieve_eratosthenes(user_num)) | 17 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class lowerCamelCase_ :
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
return self.get_dummy_input()
@property
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" )
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : int=False , __lowerCamelCase : int=False , __lowerCamelCase : Optional[int]=False , ) -> Dict:
A : Optional[Any] = 4
A : List[str] = 32
A : Any = (32, 32)
A : str = torch.manual_seed(0 )
A : int = torch.device(__lowerCamelCase )
A : List[str] = (batch_size, num_channels) + sizes
A : Dict = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase )
A : int = {"hidden_states": hidden_states}
if include_temb:
A : Any = 1_28
A : List[str] = randn_tensor((batch_size, temb_channels) , generator=__lowerCamelCase , device=__lowerCamelCase )
if include_res_hidden_states_tuple:
A : str = torch.manual_seed(1 )
A : Tuple = (randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase ),)
if include_encoder_hidden_states:
A : Dict = floats_tensor((batch_size, 32, 32) ).to(__lowerCamelCase )
if include_skip_sample:
A : Optional[int] = randn_tensor(((batch_size, 3) + sizes) , generator=__lowerCamelCase , device=__lowerCamelCase )
return dummy_input
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]:
A : Dict = {
"in_channels": 32,
"out_channels": 32,
"temb_channels": 1_28,
}
if self.block_type == "up":
A : Dict = 32
if self.block_type == "mid":
init_dict.pop("out_channels" )
A : str = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Optional[int] ) -> Union[str, Any]:
A , A : str = self.prepare_init_args_and_inputs_for_common()
A : List[Any] = self.block_class(**__lowerCamelCase )
unet_block.to(__lowerCamelCase )
unet_block.eval()
with torch.no_grad():
A : int = unet_block(**__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
A : Union[str, Any] = output[0]
self.assertEqual(output.shape , self.output_shape )
A : Any = output[0, -1, -3:, -3:]
A : Union[str, Any] = torch.tensor(__lowerCamelCase ).to(__lowerCamelCase )
assert torch_all_close(output_slice.flatten() , __lowerCamelCase , atol=5e-3 )
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
A , A : Tuple = self.prepare_init_args_and_inputs_for_common()
A : str = self.block_class(**__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
A : Optional[int] = model(**__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
A : Optional[Any] = output[0]
A : List[str] = torch.device(__lowerCamelCase )
A : List[str] = randn_tensor(output.shape , device=__lowerCamelCase )
A : Dict = torch.nn.functional.mse_loss(__lowerCamelCase , __lowerCamelCase )
loss.backward() | 17 | 1 |
'''simple docstring'''
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
lowerCamelCase :List[Any] = logging.get_logger(__name__)
lowerCamelCase :Optional[Any] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
lowerCamelCase :Optional[int] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for attribute in key.split(""".""" ):
A_ : Union[str, Any] = getattr(lowerCamelCase__ , lowerCamelCase__ )
if weight_type is not None:
A_ : List[str] = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape
else:
A_ : int = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
A_ : List[Any] = value
elif weight_type == "weight_g":
A_ : Optional[Any] = value
elif weight_type == "weight_v":
A_ : Any = value
elif weight_type == "bias":
A_ : Any = value
else:
A_ : int = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = []
A_ : int = fairseq_model.state_dict()
A_ : str = hf_model.feature_extractor
A_ : Tuple = hf_model.adapter
for name, value in fairseq_dict.items():
A_ : Any = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , hf_model.config.feat_extract_norm == """group""" , )
A_ : Tuple = True
elif any(x in name for x in ["""adaptor""", """w2v_encoder.proj.""", """w2v_proj_ln."""] ):
load_adapter(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
A_ : Tuple = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
A_ : int = True
if "*" in mapped_key:
A_ : Tuple = name.split(lowerCamelCase__ )[0].split(""".""" )[-2]
A_ : Optional[int] = mapped_key.replace("""*""" , lowerCamelCase__ )
if "weight_g" in name:
A_ : Tuple = 'weight_g'
elif "weight_v" in name:
A_ : List[str] = 'weight_v'
elif "bias" in name:
A_ : Optional[int] = 'bias'
elif "weight" in name:
A_ : Tuple = 'weight'
else:
A_ : int = None
set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(f'Unused weights: {unused_weights}' )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : int = full_name.split("""conv_layers.""" )[-1]
A_ : Dict = name.split(""".""" )
A_ : List[Any] = int(items[0] )
A_ : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
A_ : Any = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
A_ : int = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
A_ : Tuple = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
A_ : Union[str, Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = full_name.split("""adaptor.""" )[-1]
A_ : Optional[int] = name.split(""".""" )
if items[1].isdigit():
A_ : Any = int(items[1] )
else:
A_ : Any = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'
A_ : Any = value
logger.info(f'Adapter proj layer norm bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'
A_ : str = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'
A_ : int = value
logger.info(f'Adapter proj layer bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'
A_ : Optional[Any] = value
logger.info(f'Adapter proj layer weight was initialized from {full_name}.' )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'
A_ : Union[str, Any] = value
logger.info(f'Adapter layer {layer_id} bias was initialized from {full_name}.' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'
A_ : Optional[int] = value
logger.info(f'Adapter layer {layer_id} bias was initialized from {full_name}.' )
else:
unused_weights.append(lowerCamelCase__ )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Tuple = emb.weight.shape
A_ : Optional[int] = nn.Linear(lowerCamelCase__ , lowerCamelCase__ , bias=lowerCamelCase__ )
A_ : int = emb.weight.data
return lin_layer
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
'''simple docstring'''
A_ : List[str] = WavaVecaConfig.from_pretrained(
lowerCamelCase__ , add_adapter=lowerCamelCase__ , adapter_stride=lowerCamelCase__ , adapter_kernel_size=lowerCamelCase__ , use_auth_token=lowerCamelCase__ , output_hidden_size=lowerCamelCase__ , )
A_ : Optional[Any] = MBartConfig.from_pretrained(lowerCamelCase__ )
# load model
A_ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"""config_yaml""": config_yaml_path,
"""data""": """/""".join(dict_path.split("""/""" )[:-1] ),
"""w2v_path""": checkpoint_path,
"""load_pretrained_decoder_from""": None,
} , )
A_ : int = model[0].eval()
# load feature extractor
A_ : int = WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase__ , use_auth_token=lowerCamelCase__ )
# set weights for wav2vec2 encoder
A_ : str = WavaVecaModel(lowerCamelCase__ )
recursively_load_weights_wavaveca(model.encoder , lowerCamelCase__ )
# load decoder weights
A_ : Any = MBartForCausalLM(lowerCamelCase__ )
A_ : Dict = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=lowerCamelCase__ )
logger.warning(f'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(f'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
A_ : List[Any] = SpeechEncoderDecoderModel(encoder=lowerCamelCase__ , decoder=lowerCamelCase__ )
A_ : int = False
A_ : Tuple = MBartaaTokenizer(lowerCamelCase__ )
tokenizer.save_pretrained(lowerCamelCase__ )
A_ : List[str] = hf_wavavec.config.to_dict()
A_ : List[str] = tokenizer.pad_token_id
A_ : List[Any] = tokenizer.bos_token_id
A_ : List[str] = tokenizer.eos_token_id
A_ : int = 'mbart50'
A_ : Tuple = 'wav2vec2'
A_ : Optional[Any] = tokenizer.eos_token_id
A_ : Optional[Any] = 25_00_04
A_ : Dict = tokenizer.eos_token_id
A_ : int = SpeechEncoderDecoderConfig.from_dict(lowerCamelCase__ )
hf_wavavec.save_pretrained(lowerCamelCase__ )
feature_extractor.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :List[Any] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_yaml_path''', default=None, type=str, help='''Path to yaml file of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-xls-r-1b''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/mbart-large-50-one-to-many-mmt''',
type=str,
help='''Path to hf decoder checkpoint config''',
)
parser.add_argument('''--add_adapter''', default=True, type=bool, help='''whethere to add model adapter layers''')
parser.add_argument('''--adapter_stride''', default=2, type=int, help='''stride of adapter layers''')
parser.add_argument('''--adapter_kernel_size''', default=3, type=int, help='''kernel size of adapter layers''')
parser.add_argument('''--encoder_output_dim''', default=1_0_2_4, type=int, help='''encoder output dim''')
parser.add_argument('''--start_token_id''', default=2_5_0_0_0_4, type=int, help='''`decoder_start_token_id` of model config''')
lowerCamelCase :List[str] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
) | 667 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["ConvNextFeatureExtractor"]
UpperCamelCase = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 66 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class __UpperCamelCase :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1_3 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=9_9 , SCREAMING_SNAKE_CASE=3_2 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=3_7 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=5_1_2 , SCREAMING_SNAKE_CASE=1_6 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=None , ) -> List[str]:
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_token_type_ids
a__ = use_labels
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = num_labels
a__ = num_choices
a__ = scope
a__ = self.vocab_size - 1
def _UpperCAmelCase ( self ) -> Optional[int]:
a__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a__ = None
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a__ = ids_tensor([self.batch_size] , self.num_choices )
a__ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
a__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE ) -> int:
a__ = OpenAIGPTModel(config=_a )
model.to(_a )
model.eval()
a__ = model(_a , token_type_ids=_a , head_mask=_a )
a__ = model(_a , token_type_ids=_a )
a__ = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE ) -> Optional[Any]:
a__ = OpenAIGPTLMHeadModel(_a )
model.to(_a )
model.eval()
a__ = model(_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE ) -> Tuple:
a__ = OpenAIGPTDoubleHeadsModel(_a )
model.to(_a )
model.eval()
a__ = model(_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE ) -> Any:
a__ = self.num_labels
a__ = OpenAIGPTForSequenceClassification(_a )
model.to(_a )
model.eval()
a__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ = model(_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self ) -> List[Any]:
a__ = self.prepare_config_and_inputs()
(
a__
) = config_and_inputs
a__ = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_lowercase : Optional[Any] = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_lowercase : List[Any] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_lowercase : Optional[Any] = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> str:
a__ = super()._prepare_for_class(_a , _a , return_labels=_a )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
a__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=_a , )
a__ = inputs_dict["""labels"""]
a__ = inputs_dict["""labels"""]
a__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=_a , )
a__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_a )
return inputs_dict
def _UpperCAmelCase ( self ) -> Optional[int]:
a__ = OpenAIGPTModelTester(self )
a__ = ConfigTester(self , config_class=_a , n_embd=3_7 )
def _UpperCAmelCase ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> List[str]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_a )
def _UpperCAmelCase ( self ) -> List[Any]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_a )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_a )
def _UpperCAmelCase ( self ) -> Any:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_a )
@slow
def _UpperCAmelCase ( self ) -> Any:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = OpenAIGPTModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCAmelCase ( self ) -> str:
a__ = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(_a )
a__ = torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=_a ) # the president is
a__ = [
4_8_1,
4_7_3_5,
5_4_4,
2_4_6,
9_6_3,
8_7_0,
7_6_2,
2_3_9,
2_4_4,
4_0_4_7_7,
2_4_4,
2_4_9,
7_1_9,
8_8_1,
4_8_7,
5_4_4,
2_4_0,
2_4_4,
6_0_3,
4_8_1,
] # the president is a very good man. " \n " i\'m sure he is, " said the
a__ = model.generate(_a , do_sample=_a )
self.assertListEqual(output_ids[0].tolist() , _a )
| 705 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1_2 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=9_9 , SCREAMING_SNAKE_CASE=3_2 , SCREAMING_SNAKE_CASE=3_2 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=3_7 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=5_1_2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=None , ) -> Union[str, Any]:
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_input_mask
a__ = use_labels
a__ = vocab_size
a__ = hidden_size
a__ = projection_dim
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = dropout
a__ = attention_dropout
a__ = max_position_embeddings
a__ = initializer_range
a__ = scope
a__ = bos_token_id
def _UpperCAmelCase ( self ) -> str:
a__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ = None
if self.use_input_mask:
a__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
a__ = input_mask.numpy()
a__ , a__ = input_mask.shape
a__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE ):
a__ = 1
a__ = 0
a__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self ) -> Dict:
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
a__ = TFBlipTextModel(config=SCREAMING_SNAKE_CASE )
a__ = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
a__ = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCAmelCase ( self ) -> Tuple:
a__ = self.prepare_config_and_inputs()
a__ , a__ , a__ = config_and_inputs
a__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
_lowercase : List[str] = (TFBlipTextModel,) if is_tf_available() else ()
_lowercase : Optional[int] = False
_lowercase : Dict = False
_lowercase : str = False
def _UpperCAmelCase ( self ) -> Union[str, Any]:
a__ = BlipTextModelTester(self )
a__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def _UpperCAmelCase ( self ) -> Dict:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> int:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self ) -> Any:
pass
def _UpperCAmelCase ( self ) -> Union[str, Any]:
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def _UpperCAmelCase ( self ) -> List[Any]:
pass
@slow
def _UpperCAmelCase ( self ) -> Any:
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = TFBlipTextModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE=True ) -> Tuple:
super().test_pt_tf_model_equivalence(allow_missing_keys=SCREAMING_SNAKE_CASE )
| 148 | 0 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
_UpperCamelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class _lowercase( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self: Optional[int] ,a: AutoencoderKL ,a: CLIPTextModel ,a: CLIPTokenizer ,a: UNetaDConditionModel ,a: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] ,a: StableDiffusionSafetyChecker ,a: CLIPImageProcessor ,):
super().__init__()
self.register_modules(
vae=a ,text_encoder=a ,tokenizer=a ,unet=a ,scheduler=a ,safety_checker=a ,feature_extractor=a ,)
def snake_case ( self: Any ,a: Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__UpperCAmelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(a )
def snake_case ( self: Dict ):
self.enable_attention_slicing(a )
@torch.no_grad()
def __call__( self: int ,a: Union[str, List[str]] ,a: int = 512 ,a: int = 512 ,a: int = 50 ,a: float = 7.5 ,a: Optional[Union[str, List[str]]] = None ,a: Optional[int] = 1 ,a: float = 0.0 ,a: Optional[torch.Generator] = None ,a: Optional[torch.FloatTensor] = None ,a: Optional[str] = "pil" ,a: bool = True ,a: Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,a: int = 1 ,a: Optional[torch.FloatTensor] = None ,**a: int ,):
if isinstance(a ,a ):
__UpperCAmelCase = 1
elif isinstance(a ,a ):
__UpperCAmelCase = len(a )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(a )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(a ,a ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(a )}.""" )
# get prompt text embeddings
__UpperCAmelCase = self.tokenizer(
a ,padding='max_length' ,max_length=self.tokenizer.model_max_length ,return_tensors='pt' ,)
__UpperCAmelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__UpperCAmelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
__UpperCAmelCase = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
__UpperCAmelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__UpperCAmelCase = text_embeddings.shape
__UpperCAmelCase = text_embeddings.repeat(1 ,a ,1 )
__UpperCAmelCase = text_embeddings.view(bs_embed * num_images_per_prompt ,a ,-1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__UpperCAmelCase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__UpperCAmelCase = 42
if negative_prompt is None:
__UpperCAmelCase = ['']
elif type(a ) is not type(a ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(a )} !="""
f""" {type(a )}.""" )
elif isinstance(a ,a ):
__UpperCAmelCase = [negative_prompt]
elif batch_size != len(a ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(a )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
' the batch size of `prompt`.' )
else:
__UpperCAmelCase = negative_prompt
__UpperCAmelCase = text_input_ids.shape[-1]
__UpperCAmelCase = self.tokenizer(
a ,padding='max_length' ,max_length=a ,truncation=a ,return_tensors='pt' ,)
__UpperCAmelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__UpperCAmelCase = uncond_embeddings.shape[1]
__UpperCAmelCase = uncond_embeddings.repeat(a ,a ,1 )
__UpperCAmelCase = uncond_embeddings.view(batch_size * num_images_per_prompt ,a ,-1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__UpperCAmelCase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__UpperCAmelCase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__UpperCAmelCase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
__UpperCAmelCase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__UpperCAmelCase = torch.randn(
a ,generator=a ,device='cpu' ,dtype=a ).to(self.device )
__UpperCAmelCase = torch.randn(a ,generator=a ,device='cpu' ,dtype=a ).to(
self.device )
else:
__UpperCAmelCase = torch.randn(
a ,generator=a ,device=self.device ,dtype=a )
__UpperCAmelCase = torch.randn(a ,generator=a ,device=self.device ,dtype=a )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
__UpperCAmelCase = latents_reference.to(self.device )
__UpperCAmelCase = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
__UpperCAmelCase = (latents_shape[3] - latents_shape_reference[3]) // 2
__UpperCAmelCase = (latents_shape[2] - latents_shape_reference[2]) // 2
__UpperCAmelCase = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
__UpperCAmelCase = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
__UpperCAmelCase = 0 if dx < 0 else dx
__UpperCAmelCase = 0 if dy < 0 else dy
__UpperCAmelCase = max(-dx ,0 )
__UpperCAmelCase = max(-dy ,0 )
# import pdb
# pdb.set_trace()
__UpperCAmelCase = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(a )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__UpperCAmelCase = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__UpperCAmelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__UpperCAmelCase = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__UpperCAmelCase = {}
if accepts_eta:
__UpperCAmelCase = eta
for i, t in enumerate(self.progress_bar(a ) ):
# expand the latents if we are doing classifier free guidance
__UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__UpperCAmelCase = self.scheduler.scale_model_input(a ,a )
# predict the noise residual
__UpperCAmelCase = self.unet(a ,a ,encoder_hidden_states=a ).sample
# perform guidance
if do_classifier_free_guidance:
__UpperCAmelCase = noise_pred.chunk(2 )
__UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__UpperCAmelCase = self.scheduler.step(a ,a ,a ,**a ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(a ,a ,a )
__UpperCAmelCase = 1 / 0.18215 * latents
__UpperCAmelCase = self.vae.decode(a ).sample
__UpperCAmelCase = (image / 2 + 0.5).clamp(0 ,1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__UpperCAmelCase = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if self.safety_checker is not None:
__UpperCAmelCase = self.feature_extractor(self.numpy_to_pil(a ) ,return_tensors='pt' ).to(
self.device )
__UpperCAmelCase = self.safety_checker(
images=a ,clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
__UpperCAmelCase = None
if output_type == "pil":
__UpperCAmelCase = self.numpy_to_pil(a )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=a ,nsfw_content_detected=a )
| 396 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
A = logging.get_logger(__name__)
A = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any) -> List[str]:
'''simple docstring'''
for attribute in key.split('.'):
_lowercase : Dict = getattr(lowerCAmelCase__ , lowerCAmelCase__)
if weight_type is not None:
_lowercase : int = getattr(lowerCAmelCase__ , lowerCAmelCase__).shape
else:
_lowercase : List[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_lowercase : Optional[Any] = value
elif weight_type == "weight_g":
_lowercase : Tuple = value
elif weight_type == "weight_v":
_lowercase : List[str] = value
elif weight_type == "bias":
_lowercase : Tuple = value
else:
_lowercase : Dict = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''')
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tuple) -> str:
'''simple docstring'''
_lowercase : Tuple = []
_lowercase : Optional[int] = fairseq_model.state_dict()
_lowercase : List[Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_lowercase : int = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == 'group' , )
_lowercase : Tuple = True
else:
for key, mapped_key in MAPPING.items():
_lowercase : str = 'hubert.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or (key.split('w2v_model.')[-1] == name.split('.')[0] and not is_finetuned):
_lowercase : Dict = True
if "*" in mapped_key:
_lowercase : int = name.split(lowerCAmelCase__)[0].split('.')[-2]
_lowercase : Optional[Any] = mapped_key.replace('*' , lowerCAmelCase__)
if "weight_g" in name:
_lowercase : int = 'weight_g'
elif "weight_v" in name:
_lowercase : Optional[int] = 'weight_v'
elif "weight" in name:
_lowercase : Tuple = 'weight'
elif "bias" in name:
_lowercase : int = 'bias'
else:
_lowercase : Optional[Any] = None
set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
continue
if not is_used:
unused_weights.append(lowerCAmelCase__)
logger.warning(F'''Unused weights: {unused_weights}''')
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple) -> int:
'''simple docstring'''
_lowercase : str = full_name.split('conv_layers.')[-1]
_lowercase : List[str] = name.split('.')
_lowercase : Optional[Any] = int(items[0])
_lowercase : Tuple = int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_lowercase : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_lowercase : Tuple = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_lowercase : Union[str, Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_lowercase : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
else:
unused_weights.append(lowerCAmelCase__)
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : Dict=True) -> Tuple:
'''simple docstring'''
if config_path is not None:
_lowercase : List[str] = HubertConfig.from_pretrained(lowerCAmelCase__)
else:
_lowercase : List[str] = HubertConfig()
if is_finetuned:
if dict_path:
_lowercase : Any = Dictionary.load(lowerCAmelCase__)
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowercase : Tuple = target_dict.pad_index
_lowercase : List[str] = target_dict.bos_index
_lowercase : Tuple = target_dict.eos_index
_lowercase : Dict = len(target_dict.symbols)
_lowercase : Optional[int] = os.path.join(lowerCAmelCase__ , 'vocab.json')
if not os.path.isdir(lowerCAmelCase__):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCAmelCase__))
return
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__)
with open(lowerCAmelCase__ , 'w' , encoding='utf-8') as vocab_handle:
json.dump(target_dict.indices , lowerCAmelCase__)
_lowercase : List[Any] = WavaVecaCTCTokenizer(
lowerCAmelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCAmelCase__ , )
_lowercase : Optional[Any] = True if config.feat_extract_norm == 'layer' else False
_lowercase : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , )
_lowercase : int = WavaVecaProcessor(feature_extractor=lowerCAmelCase__ , tokenizer=lowerCAmelCase__)
processor.save_pretrained(lowerCAmelCase__)
_lowercase : Optional[Any] = HubertForCTC(lowerCAmelCase__)
else:
_lowercase : Union[str, Any] = HubertModel(lowerCAmelCase__)
if is_finetuned:
_lowercase , _lowercase , _lowercase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/')[:-1])})
else:
_lowercase , _lowercase , _lowercase : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path])
_lowercase : Union[str, Any] = model[0].eval()
recursively_load_weights(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
hf_wavavec.save_pretrained(lowerCAmelCase__)
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
A = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 125 | 0 |
"""simple docstring"""
import math
def lowercase_ ( _lowerCamelCase: int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase_ ( _lowerCamelCase: int = 10001 ) -> int:
'''simple docstring'''
try:
__lowerCamelCase : Optional[int] = int(_lowerCamelCase )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
__lowerCamelCase : list[int] = []
__lowerCamelCase : Tuple = 2
while len(_lowerCamelCase ) < nth:
if is_prime(_lowerCamelCase ):
primes.append(_lowerCamelCase )
num += 1
else:
num += 1
return primes[len(_lowerCamelCase ) - 1]
if __name__ == "__main__":
print(F"""{solution() = }""") | 702 | """simple docstring"""
from functools import lru_cache
def lowercase_ ( _lowerCamelCase: int ) -> set:
'''simple docstring'''
__lowerCamelCase : Optional[Any] = 2
__lowerCamelCase : Tuple = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(_lowerCamelCase )
if n > 1:
factors.add(_lowerCamelCase )
return factors
@lru_cache
def lowercase_ ( _lowerCamelCase: int ) -> int:
'''simple docstring'''
return len(unique_prime_factors(_lowerCamelCase ) )
def lowercase_ ( _lowerCamelCase: list ) -> bool:
'''simple docstring'''
return len(set(_lowerCamelCase ) ) in (0, 1)
def lowercase_ ( _lowerCamelCase: int ) -> list:
'''simple docstring'''
__lowerCamelCase : str = 2
while True:
# Increment each value of a generated range
__lowerCamelCase : int = [base + i for i in range(_lowerCamelCase )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__lowerCamelCase : Dict = [upf_len(_lowerCamelCase ) for x in group]
checker.append(_lowerCamelCase )
# If all numbers in the list are equal, return the group variable.
if equality(_lowerCamelCase ):
return group
# Increment our base variable by 1
base += 1
def lowercase_ ( _lowerCamelCase: int = 4 ) -> int:
'''simple docstring'''
__lowerCamelCase : Any = run(_lowerCamelCase )
return results[0] if len(_lowerCamelCase ) else None
if __name__ == "__main__":
print(solution()) | 366 | 0 |
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
"""simple docstring"""
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ,UpperCAmelCase__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
_SCREAMING_SNAKE_CASE = key.replace('heads.cmd.mim_head.cls.predictions' ,'mmm_image_head' )
_SCREAMING_SNAKE_CASE = key.replace('heads.cmd.mlm_head.cls.predictions' ,'mmm_text_head' )
_SCREAMING_SNAKE_CASE = key.replace('heads.cmd.itm_head.cls' ,'itm_head' )
_SCREAMING_SNAKE_CASE = key.replace('heads.cmd.itm_head.pooler' ,'itm_head.pooler' )
_SCREAMING_SNAKE_CASE = key.replace('heads.cmd.clip_head.logit_scale' ,'flava.logit_scale' )
_SCREAMING_SNAKE_CASE = key.replace('heads.fairseq_mlm.cls.predictions' ,'mlm_head' )
_SCREAMING_SNAKE_CASE = key.replace('heads.imagenet.mim_head.cls.predictions' ,'mim_head' )
_SCREAMING_SNAKE_CASE = key.replace('mm_text_projection' ,'flava.text_to_mm_projection' )
_SCREAMING_SNAKE_CASE = key.replace('mm_image_projection' ,'flava.image_to_mm_projection' )
_SCREAMING_SNAKE_CASE = key.replace('image_encoder.module' ,'flava.image_model' )
_SCREAMING_SNAKE_CASE = key.replace('text_encoder.module' ,'flava.text_model' )
_SCREAMING_SNAKE_CASE = key.replace('mm_encoder.module.encoder.cls_token' ,'flava.multimodal_model.cls_token' )
_SCREAMING_SNAKE_CASE = key.replace('mm_encoder.module' ,'flava.multimodal_model' )
_SCREAMING_SNAKE_CASE = key.replace('text_projection' ,'flava.text_projection' )
_SCREAMING_SNAKE_CASE = key.replace('image_projection' ,'flava.image_projection' )
_SCREAMING_SNAKE_CASE = value.float()
for key, value in codebook_state_dict.items():
_SCREAMING_SNAKE_CASE = value
return upgrade
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__=None ):
"""simple docstring"""
if config_path is not None:
_SCREAMING_SNAKE_CASE = FlavaConfig.from_pretrained(UpperCAmelCase__ )
else:
_SCREAMING_SNAKE_CASE = FlavaConfig()
_SCREAMING_SNAKE_CASE = FlavaForPreTraining(UpperCAmelCase__ ).eval()
_SCREAMING_SNAKE_CASE = convert_dalle_checkpoint(UpperCAmelCase__ ,UpperCAmelCase__ ,save_checkpoint=UpperCAmelCase__ )
if os.path.exists(UpperCAmelCase__ ):
_SCREAMING_SNAKE_CASE = torch.load(UpperCAmelCase__ ,map_location='cpu' )
else:
_SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(UpperCAmelCase__ ,map_location='cpu' )
_SCREAMING_SNAKE_CASE = upgrade_state_dict(UpperCAmelCase__ ,UpperCAmelCase__ )
hf_model.load_state_dict(UpperCAmelCase__ )
_SCREAMING_SNAKE_CASE = hf_model.state_dict()
_SCREAMING_SNAKE_CASE = count_parameters(UpperCAmelCase__ )
_SCREAMING_SNAKE_CASE = count_parameters(UpperCAmelCase__ ) + count_parameters(UpperCAmelCase__ )
assert torch.allclose(UpperCAmelCase__ ,UpperCAmelCase__ ,atol=1e-3 )
hf_model.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
snake_case : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
snake_case : Tuple = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 605 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = BertConfig.from_json_file(UpperCAmelCase__ )
print(f'''Building PyTorch model from configuration: {config}''' )
_SCREAMING_SNAKE_CASE = BertForPreTraining(UpperCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() ,UpperCAmelCase__ )
if __name__ == "__main__":
snake_case : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
snake_case : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 605 | 1 |
'''simple docstring'''
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__UpperCamelCase : Union[str, Any] = 2
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self , *, # begin keyword-only arguments
UpperCAmelCase__="<s>" , UpperCAmelCase__="<pad>" , UpperCAmelCase__="</s>" , UpperCAmelCase__="<unk>" , UpperCAmelCase__=None , ) -> List[str]:
_A , _A , _A , _A : List[str] = bos, unk, pad, eos
_A : Dict = []
_A : str = []
_A : List[str] = {}
_A : Tuple = self.add_symbol(UpperCAmelCase__ )
_A : Tuple = self.add_symbol(UpperCAmelCase__ )
_A : Any = self.add_symbol(UpperCAmelCase__ )
_A : int = self.add_symbol(UpperCAmelCase__ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(UpperCAmelCase__ )
_A : str = len(self.symbols )
def __eq__( self , UpperCAmelCase__ ) -> int:
return self.indices == other.indices
def __getitem__( self , UpperCAmelCase__ ) -> List[str]:
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self ) -> Optional[int]:
return len(self.symbols )
def __contains__( self , UpperCAmelCase__ ) -> Any:
return sym in self.indices
@classmethod
def _lowerCamelCase ( cls , UpperCAmelCase__ ) -> str:
_A : Optional[Any] = cls()
d.add_from_file(UpperCAmelCase__ )
return d
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__=1 , UpperCAmelCase__=False ) -> str:
if word in self.indices and not overwrite:
_A : List[Any] = self.indices[word]
_A : List[Any] = self.count[idx] + n
return idx
else:
_A : Any = len(self.symbols )
_A : Dict = idx
self.symbols.append(UpperCAmelCase__ )
self.count.append(UpperCAmelCase__ )
return idx
def _lowerCamelCase ( self , UpperCAmelCase__ ) -> List[str]:
return 0
def _lowerCamelCase ( self , UpperCAmelCase__ ) -> Optional[Any]:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
try:
with open(UpperCAmelCase__ , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(UpperCAmelCase__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('''Incorrect encoding detected in {}, please rebuild the dataset'''.format(UpperCAmelCase__ ) )
return
_A : str = f.readlines()
_A : int = self._load_meta(UpperCAmelCase__ )
for line in lines[indices_start_line:]:
try:
_A , _A : Dict = line.rstrip().rsplit(''' ''' , 1 )
if field == "#fairseq:overwrite":
_A : List[str] = True
_A , _A : List[Any] = line.rsplit(''' ''' , 1 )
else:
_A : int = False
_A : str = int(UpperCAmelCase__ )
_A : Any = line
if word in self and not overwrite:
raise RuntimeError(
'''Duplicate word found when loading Dictionary: \'{}\'. '''
'''Duplicate words can overwrite earlier ones by adding the '''
'''#fairseq:overwrite flag at the end of the corresponding row '''
'''in the dictionary file. If using the Camembert model, please '''
'''download an updated copy of the model file.'''.format(UpperCAmelCase__ ) )
self.add_symbol(UpperCAmelCase__ , n=UpperCAmelCase__ , overwrite=UpperCAmelCase__ )
except ValueError:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt> [flags]\'''' )
def lowercase ( lowerCAmelCase : Dict):
"""simple docstring"""
_A : Optional[int] = dict((re.sub(R'''@@$''' , '''''' , lowerCAmelCase), v) if k.endswith('''@@''') else (re.sub(R'''$''' , '''</w>''' , lowerCAmelCase), v) for k, v in d.items())
_A : List[Any] = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[f"""{k}</w>"""]
_A : Union[str, Any] = d[k] # restore
return da
def lowercase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict):
"""simple docstring"""
if not os.path.exists(lowerCAmelCase):
raise ValueError(f"""path {biogpt_checkpoint_path} does not exist!""")
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase)
print(f"""Writing results to {pytorch_dump_folder_path}""")
# handle various types of models
_A : List[str] = os.path.join(lowerCAmelCase , '''checkpoint.pt''')
if not os.path.isfile(lowerCAmelCase):
raise ValueError(f"""path to the file {checkpoint_file} does not exist!""")
_A : Dict = torch.load(lowerCAmelCase , map_location='''cpu''')
_A : List[str] = chkpt['''cfg''']['''model''']
# dicts
_A : List[Any] = os.path.join(lowerCAmelCase , '''dict.txt''')
if not os.path.isfile(lowerCAmelCase):
raise ValueError(f"""path to the file {dict_file} does not exist!""")
_A : str = Dictionary.load(lowerCAmelCase)
_A : str = rewrite_dict_keys(src_dict.indices)
_A : int = len(lowerCAmelCase)
_A : Tuple = os.path.join(lowerCAmelCase , VOCAB_FILES_NAMES['''vocab_file'''])
print(f"""Generating {src_vocab_file} of {src_vocab_size} records""")
with open(lowerCAmelCase , '''w''' , encoding='''utf-8''') as f:
f.write(json.dumps(lowerCAmelCase , ensure_ascii=lowerCAmelCase , indent=lowerCAmelCase))
# merges_file (bpecodes)
_A : Optional[Any] = os.path.join(lowerCAmelCase , '''bpecodes''')
if not os.path.isfile(lowerCAmelCase):
raise ValueError(f"""path to the file {bpecodes_file} does not exist!""")
_A : Dict = os.path.join(lowerCAmelCase , VOCAB_FILES_NAMES['''merges_file'''])
shutil.copyfile(lowerCAmelCase , lowerCAmelCase)
# model config
_A : int = os.path.join(lowerCAmelCase , '''config.json''')
_A : Any = {
'''activation_dropout''': args['''activation_dropout'''],
'''architectures''': ['''BioGptForCausalLM'''],
'''attention_probs_dropout_prob''': args['''attention_dropout'''],
'''bos_token_id''': 0,
'''eos_token_id''': 2,
'''hidden_act''': args['''activation_fn'''],
'''hidden_dropout_prob''': args['''dropout'''],
'''hidden_size''': args['''decoder_embed_dim'''],
'''initializer_range''': 0.0_2,
'''intermediate_size''': args['''decoder_ffn_embed_dim'''],
'''layer_norm_eps''': 1e-12,
'''layerdrop''': args['''decoder_layerdrop'''],
'''max_position_embeddings''': args['''max_target_positions'''],
'''model_type''': '''biogpt''',
'''num_attention_heads''': args['''decoder_attention_heads'''],
'''num_hidden_layers''': args['''decoder_layers'''],
'''pad_token_id''': 1,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_decoder_input_output_embed'''],
'''vocab_size''': src_vocab_size,
}
# good hparam defaults to start with
print(f"""Generating {biogpt_model_config_file}""")
with open(lowerCAmelCase , '''w''' , encoding='''utf-8''') as f:
f.write(json.dumps(lowerCAmelCase , ensure_ascii=lowerCAmelCase , indent=lowerCAmelCase))
# tokenizer config
_A : Tuple = os.path.join(lowerCAmelCase , lowerCAmelCase)
_A : Tuple = {
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
'''model_max_length''': 1024,
'''pad_token''': '''<pad>''',
'''special_tokens_map_file''': None,
'''tokenizer_class''': '''BioGptTokenizer''',
'''unk_token''': '''<unk>''',
}
print(f"""Generating {biogpt_tokenizer_config_file}""")
with open(lowerCAmelCase , '''w''' , encoding='''utf-8''') as f:
f.write(json.dumps(lowerCAmelCase , ensure_ascii=lowerCAmelCase , indent=lowerCAmelCase))
# model
_A : Tuple = chkpt['''model''']
# remove unneeded keys
_A : Union[str, Any] = [
'''decoder.version''',
]
for k in ignore_keys:
model_state_dict.pop(lowerCAmelCase , lowerCAmelCase)
_A : Optional[int] = list(model_state_dict.keys())
for layer_name in layer_names:
if layer_name.endswith('''output_projection.weight'''):
_A : Union[str, Any] = model_state_dict.pop(lowerCAmelCase)
else:
_A : str = model_state_dict.pop(lowerCAmelCase)
_A : Optional[int] = BioGptConfig.from_pretrained(lowerCAmelCase)
_A : List[Any] = BioGptForCausalLM(lowerCAmelCase)
# check that it loads ok
model_new.load_state_dict(lowerCAmelCase)
# save
_A : Optional[int] = os.path.join(lowerCAmelCase , lowerCAmelCase)
print(f"""Generating {pytorch_weights_dump_path}""")
torch.save(lowerCAmelCase , lowerCAmelCase)
print('''Conversion is done!''')
if __name__ == "__main__":
__UpperCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__UpperCamelCase : Optional[Any] = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 417 |
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__UpperCamelCase : Union[str, Any] = '''platform'''
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def lowercase ( lowerCAmelCase : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict=None , lowerCAmelCase : str=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : List[str]=None , ):
"""simple docstring"""
if attention_mask is None:
_A : Union[str, Any] = np.where(input_ids != config.pad_token_id , 1 , 0)
if decoder_attention_mask is None:
_A : List[str] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0)
if head_mask is None:
_A : Tuple = np.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
_A : Tuple = np.ones((config.decoder_layers, config.decoder_attention_heads))
if cross_attn_head_mask is None:
_A : Dict = np.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=1_3 , UpperCAmelCase__=7 , UpperCAmelCase__=True , UpperCAmelCase__=False , UpperCAmelCase__=9_9 , UpperCAmelCase__=1_6 , UpperCAmelCase__=2 , UpperCAmelCase__=4 , UpperCAmelCase__=4 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=3_2 , UpperCAmelCase__=2 , UpperCAmelCase__=1 , UpperCAmelCase__=0 , UpperCAmelCase__=0.0_2 , ) -> Tuple:
_A : List[Any] = parent
_A : Optional[Any] = batch_size
_A : int = seq_length
_A : Optional[int] = is_training
_A : List[Any] = use_labels
_A : Optional[Any] = vocab_size
_A : Tuple = hidden_size
_A : str = num_hidden_layers
_A : Tuple = num_attention_heads
_A : Optional[Any] = intermediate_size
_A : Tuple = hidden_act
_A : int = hidden_dropout_prob
_A : Optional[Any] = attention_probs_dropout_prob
_A : Tuple = max_position_embeddings
_A : Optional[int] = eos_token_id
_A : Optional[Any] = pad_token_id
_A : str = bos_token_id
_A : Optional[Any] = initializer_range
def _lowerCamelCase ( self ) -> Tuple:
_A : List[Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_A : Any = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_A : Optional[Any] = shift_tokens_right(UpperCAmelCase__ , 1 , 2 )
_A : Any = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCAmelCase__ , )
_A : Dict = prepare_blenderbot_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return config, inputs_dict
def _lowerCamelCase ( self ) -> Optional[Any]:
_A , _A : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Tuple:
_A : Tuple = 2_0
_A : Tuple = model_class_name(UpperCAmelCase__ )
_A : Any = model.encode(inputs_dict['''input_ids'''] )
_A , _A : str = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_A : Tuple = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase__ , UpperCAmelCase__ )
_A : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
_A : Optional[int] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_A : Any = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , )
_A : List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
_A : List[Any] = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase__ , )
_A : List[str] = model.decode(UpperCAmelCase__ , UpperCAmelCase__ )
_A : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"""Max diff is {diff}""" )
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[Any]:
_A : Optional[Any] = 2_0
_A : Optional[int] = model_class_name(UpperCAmelCase__ )
_A : Any = model.encode(inputs_dict['''input_ids'''] )
_A , _A : Tuple = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_A : Tuple = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_A : Any = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase__ , UpperCAmelCase__ )
_A : Dict = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_A : int = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , )
_A : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
_A : str = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , )
_A : int = model.decode(UpperCAmelCase__ , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ )
_A : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
__magic_name__ = 9_9
def _lowerCamelCase ( self ) -> List[str]:
_A : str = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
_A : Optional[int] = input_ids.shape[0]
_A : Optional[int] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _lowerCamelCase ( self ) -> Any:
_A , _A , _A : Dict = self._get_config_and_data()
_A : Dict = FlaxBlenderbotSmallForConditionalGeneration(UpperCAmelCase__ )
_A : int = lm_model(input_ids=UpperCAmelCase__ )
_A : Dict = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , UpperCAmelCase__ )
def _lowerCamelCase ( self ) -> str:
_A : Dict = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
_A : Union[str, Any] = FlaxBlenderbotSmallForConditionalGeneration(UpperCAmelCase__ )
_A : List[str] = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
_A : Any = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
_A : Tuple = lm_model(input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ )
_A : List[Any] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , UpperCAmelCase__ )
def _lowerCamelCase ( self ) -> Optional[int]:
_A : List[str] = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
_A : str = shift_tokens_right(UpperCAmelCase__ , 1 , 2 )
_A : Optional[int] = np.equal(UpperCAmelCase__ , 1 ).astype(np.floataa ).sum()
_A : Optional[int] = np.equal(UpperCAmelCase__ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(UpperCAmelCase__ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCamelCase__ ( snake_case_ , unittest.TestCase , snake_case_ ):
"""simple docstring"""
__magic_name__ = True
__magic_name__ = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
__magic_name__ = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def _lowerCamelCase ( self ) -> int:
_A : Optional[int] = FlaxBlenderbotSmallModelTester(self )
def _lowerCamelCase ( self ) -> Union[str, Any]:
_A , _A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def _lowerCamelCase ( self ) -> Any:
_A , _A : str = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def _lowerCamelCase ( self ) -> Optional[int]:
_A , _A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_A : List[str] = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
_A : Any = model_class(UpperCAmelCase__ )
@jax.jit
def encode_jitted(UpperCAmelCase__ , UpperCAmelCase__=None , **UpperCAmelCase__ ):
return model.encode(input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
with self.subTest('''JIT Enabled''' ):
_A : Optional[int] = encode_jitted(**UpperCAmelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_A : int = encode_jitted(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def _lowerCamelCase ( self ) -> Union[str, Any]:
_A , _A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_A : Any = model_class(UpperCAmelCase__ )
_A : Dict = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
_A : str = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
return model.decode(
decoder_input_ids=UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , encoder_outputs=UpperCAmelCase__ , )
with self.subTest('''JIT Enabled''' ):
_A : int = decode_jitted(**UpperCAmelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_A : Any = decode_jitted(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _lowerCamelCase ( self ) -> List[str]:
for model_class_name in self.all_model_classes:
_A : Any = model_class_name.from_pretrained('''facebook/blenderbot_small-90M''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_A : Union[str, Any] = np.ones((1, 1) ) * model.config.eos_token_id
_A : Optional[Any] = model(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
| 417 | 1 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowercase ( snake_case__ , snake_case__):
"""simple docstring"""
a__ : int = 1
@register_to_config
def __init__( self : Union[str, Any] , __UpperCAmelCase : int = 1_000 , __UpperCAmelCase : Optional[Union[np.ndarray, List[float]]] = None ) -> int:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__UpperCAmelCase )
# standard deviation of the initial noise distribution
UpperCAmelCase_= 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
UpperCAmelCase_= 4
# running values
UpperCAmelCase_= []
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, torch.device] = None ) -> str:
UpperCAmelCase_= num_inference_steps
UpperCAmelCase_= torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
UpperCAmelCase_= torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
UpperCAmelCase_= torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
UpperCAmelCase_= torch.sin(steps * math.pi / 2 ) ** 2
UpperCAmelCase_= (1.0 - self.betas**2) ** 0.5
UpperCAmelCase_= (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
UpperCAmelCase_= timesteps.to(__UpperCAmelCase )
UpperCAmelCase_= []
def _SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : int , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : bool = True , ) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
UpperCAmelCase_= (self.timesteps == timestep).nonzero().item()
UpperCAmelCase_= timestep_index + 1
UpperCAmelCase_= sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__UpperCAmelCase )
if len(self.ets ) == 1:
UpperCAmelCase_= self.ets[-1]
elif len(self.ets ) == 2:
UpperCAmelCase_= (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
UpperCAmelCase_= (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
UpperCAmelCase_= (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
UpperCAmelCase_= self._get_prev_sample(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : torch.FloatTensor , *__UpperCAmelCase : Tuple , **__UpperCAmelCase : str ) -> torch.FloatTensor:
return sample
def _SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] ) -> int:
UpperCAmelCase_= self.alphas[timestep_index]
UpperCAmelCase_= self.betas[timestep_index]
UpperCAmelCase_= self.alphas[prev_timestep_index]
UpperCAmelCase_= self.betas[prev_timestep_index]
UpperCAmelCase_= (sample - sigma * ets) / max(__UpperCAmelCase , 1E-8 )
UpperCAmelCase_= next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : List[str] ) -> List[str]:
return self.config.num_train_timesteps
| 593 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowercase ( snake_case__):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
UpperCAmelCase_= self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCAmelCase , """width_multiplier""" ) )
class lowercase :
"""simple docstring"""
def __init__( self : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : List[str]=13 , __UpperCAmelCase : Optional[int]=64 , __UpperCAmelCase : Optional[Any]=2 , __UpperCAmelCase : Any=3 , __UpperCAmelCase : List[str]="swish" , __UpperCAmelCase : Optional[int]=3 , __UpperCAmelCase : int=32 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : List[str]=0.02 , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Dict=10 , __UpperCAmelCase : Dict=None , __UpperCAmelCase : List[str]=0.25 , __UpperCAmelCase : Optional[int]=0.0 , __UpperCAmelCase : Any=0.0 , ) -> List[str]:
UpperCAmelCase_= parent
UpperCAmelCase_= batch_size
UpperCAmelCase_= image_size
UpperCAmelCase_= patch_size
UpperCAmelCase_= num_channels
UpperCAmelCase_= make_divisible(512 * width_multiplier , divisor=8 )
UpperCAmelCase_= hidden_act
UpperCAmelCase_= conv_kernel_size
UpperCAmelCase_= output_stride
UpperCAmelCase_= classifier_dropout_prob
UpperCAmelCase_= use_labels
UpperCAmelCase_= is_training
UpperCAmelCase_= num_labels
UpperCAmelCase_= initializer_range
UpperCAmelCase_= scope
UpperCAmelCase_= width_multiplier
UpperCAmelCase_= ffn_dropout
UpperCAmelCase_= attn_dropout
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_= floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_= None
UpperCAmelCase_= None
if self.use_labels:
UpperCAmelCase_= ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_= ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase_= self.get_config()
return config, pixel_values, labels, pixel_labels
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] ) -> Optional[int]:
UpperCAmelCase_= MobileViTVaModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= model(__UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : str , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] ) -> List[Any]:
UpperCAmelCase_= self.num_labels
UpperCAmelCase_= MobileViTVaForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any ) -> str:
UpperCAmelCase_= self.num_labels
UpperCAmelCase_= MobileViTVaForSemanticSegmentation(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= model(__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
UpperCAmelCase_= model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Any:
UpperCAmelCase_= self.prepare_config_and_inputs()
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= config_and_inputs
UpperCAmelCase_= {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
a__ : List[str] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
a__ : str = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a__ : Any = False
a__ : int = False
a__ : Optional[int] = False
a__ : Union[str, Any] = False
def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
UpperCAmelCase_= MobileViTVaModelTester(self )
UpperCAmelCase_= MobileViTVaConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
pass
@unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
pass
@unittest.skip(reason="""MobileViTV2 does not output attentions""" )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
pass
@require_torch_multi_gpu
@unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
pass
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
UpperCAmelCase_, UpperCAmelCase_= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_= model_class(__UpperCAmelCase )
UpperCAmelCase_= inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_= [*signature.parameters.keys()]
UpperCAmelCase_= ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
def check_hidden_states_output(__UpperCAmelCase : List[str] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[Any] ):
UpperCAmelCase_= model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_= model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
UpperCAmelCase_= outputs.hidden_states
UpperCAmelCase_= 5
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
UpperCAmelCase_= 2
for i in range(len(__UpperCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
UpperCAmelCase_, UpperCAmelCase_= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_= True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_= True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase )
@slow
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_= MobileViTVaModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def __a ( ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_= Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase):
"""simple docstring"""
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
return (
MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" )
if is_vision_available()
else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
UpperCAmelCase_= MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to(
__UpperCAmelCase )
UpperCAmelCase_= self.default_image_processor
UpperCAmelCase_= prepare_img()
UpperCAmelCase_= image_processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_= model(**__UpperCAmelCase )
# verify the logits
UpperCAmelCase_= torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
UpperCAmelCase_= torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
UpperCAmelCase_= MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
UpperCAmelCase_= model.to(__UpperCAmelCase )
UpperCAmelCase_= MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
UpperCAmelCase_= prepare_img()
UpperCAmelCase_= image_processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_= model(**__UpperCAmelCase )
UpperCAmelCase_= outputs.logits
# verify the logits
UpperCAmelCase_= torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __UpperCAmelCase )
UpperCAmelCase_= torch.tensor(
[
[[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]],
[[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]],
[[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]],
] , device=__UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
UpperCAmelCase_= MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
UpperCAmelCase_= model.to(__UpperCAmelCase )
UpperCAmelCase_= MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
UpperCAmelCase_= prepare_img()
UpperCAmelCase_= image_processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_= model(**__UpperCAmelCase )
UpperCAmelCase_= outputs.logits.detach().cpu()
UpperCAmelCase_= image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase , target_sizes=[(50, 60)] )
UpperCAmelCase_= torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
UpperCAmelCase_= image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase )
UpperCAmelCase_= torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
| 593 | 1 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
UpperCamelCase = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def lowerCamelCase_ ( _lowercase , _lowercase , _lowercase ) -> Tuple:
__A : int = state_dict.pop(_UpperCAmelCase )
__A : Optional[Any] = val
def lowerCamelCase_ ( _lowercase ) -> Tuple:
__A : Optional[Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__A : Any = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
__A : Any = value
else:
__A : Union[str, Any] = value
return new_state_dict
def lowerCamelCase_ ( _lowercase , _lowercase=False ) -> List[Any]:
__A : int = ""
if is_panoptic:
__A : Tuple = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__A : Any = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
__A : Tuple = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__A : Union[str, Any] = in_proj_weight[:256, :]
__A : List[Any] = in_proj_bias[:256]
__A : Union[str, Any] = in_proj_weight[256:512, :]
__A : Dict = in_proj_bias[256:512]
__A : List[str] = in_proj_weight[-256:, :]
__A : List[Any] = in_proj_bias[-256:]
def lowerCamelCase_ ( ) -> Optional[int]:
__A : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
__A : Union[str, Any] = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( _lowercase , _lowercase ) -> List[str]:
__A : Union[str, Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
__A : Dict = "resnet101"
if "dc5" in model_name:
__A : Dict = True
__A : Tuple = "panoptic" in model_name
if is_panoptic:
__A : Optional[int] = 250
else:
__A : str = 91
__A : List[Any] = "huggingface/label-files"
__A : List[str] = "coco-detection-id2label.json"
__A : List[Any] = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type="dataset" ) , "r" ) )
__A : Optional[Any] = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
__A : List[str] = idalabel
__A : Optional[Any] = {v: k for k, v in idalabel.items()}
# load image processor
__A : Dict = "coco_panoptic" if is_panoptic else "coco_detection"
__A : Tuple = ConditionalDetrImageProcessor(format=_UpperCAmelCase )
# prepare image
__A : Tuple = prepare_img()
__A : Optional[int] = image_processor(images=_UpperCAmelCase , return_tensors="pt" )
__A : Tuple = encoding["pixel_values"]
logger.info(F"Converting model {model_name}..." )
# load original model from torch hub
__A : Union[str, Any] = torch.hub.load("DeppMeng/ConditionalDETR" , _UpperCAmelCase , pretrained=_UpperCAmelCase ).eval()
__A : Tuple = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
__A : int = "conditional_detr." + src
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__A : List[Any] = rename_backbone_keys(_UpperCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(_UpperCAmelCase , is_panoptic=_UpperCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__A : Optional[Any] = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
__A : Dict = state_dict.pop(_UpperCAmelCase )
__A : Optional[int] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__A : Dict = state_dict.pop(_UpperCAmelCase )
__A : str = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
__A : Optional[int] = state_dict.pop(_UpperCAmelCase )
__A : List[Any] = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
__A : Dict = state_dict.pop(_UpperCAmelCase )
__A : Any = val
# finally, create HuggingFace model and load state dict
__A : List[Any] = ConditionalDetrForSegmentation(_UpperCAmelCase ) if is_panoptic else ConditionalDetrForObjectDetection(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
model.push_to_hub(repo_id=_UpperCAmelCase , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
__A : Any = conditional_detr(_UpperCAmelCase )
__A : Dict = model(_UpperCAmelCase )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
image_processor.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
UpperCamelCase = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 716 | import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
UpperCamelCase = Mapping[str, np.ndarray]
UpperCamelCase = Mapping[str, Any] # Is a nested dict.
UpperCamelCase = 0.01
@dataclasses.dataclass(frozen=lowerCAmelCase__ )
class _a :
'''simple docstring'''
lowerCamelCase_ : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
lowerCamelCase_ : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
lowerCamelCase_ : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
lowerCamelCase_ : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
lowerCamelCase_ : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
lowerCamelCase_ : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
lowerCamelCase_ : Optional[str] = None
# Templates used to generate this protein (prediction-only)
lowerCamelCase_ : Optional[Sequence[str]] = None
# Chain corresponding to each parent
lowerCamelCase_ : Optional[Sequence[int]] = None
def lowerCamelCase_ ( _lowercase ) -> Protein:
__A : Optional[int] = r"(\[[A-Z]+\]\n)"
__A : List[str] = [tag.strip() for tag in re.split(_lowercase , _lowercase ) if len(_lowercase ) > 0]
__A : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split("\n" ) for l in tags[1::2]] )
__A : List[str] = ["N", "CA", "C"]
__A : Any = None
__A : int = None
__A : Tuple = None
for g in groups:
if "[PRIMARY]" == g[0]:
__A : List[str] = g[1][0].strip()
for i in range(len(_lowercase ) ):
if seq[i] not in residue_constants.restypes:
__A : Optional[int] = "X" # FIXME: strings are immutable
__A : str = np.array(
[residue_constants.restype_order.get(_lowercase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
__A : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(_lowercase , g[1][axis].split() ) ) )
__A : str = np.array(_lowercase )
__A : Tuple = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(_lowercase ):
__A : str = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
__A : Dict = np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip() ) ) )
__A : List[str] = np.zeros(
(
len(_lowercase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(_lowercase ):
__A : int = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=_lowercase , atom_mask=_lowercase , aatype=_lowercase , residue_index=np.arange(len(_lowercase ) ) , b_factors=_lowercase , )
def lowerCamelCase_ ( _lowercase , _lowercase = 0 ) -> List[str]:
__A : List[str] = []
__A : Any = prot.remark
if remark is not None:
pdb_headers.append(F"REMARK {remark}" )
__A : Dict = prot.parents
__A : List[str] = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
__A : Optional[int] = [p for i, p in zip(_lowercase , _lowercase ) if i == chain_id]
if parents is None or len(_lowercase ) == 0:
__A : List[Any] = ["N/A"]
pdb_headers.append(F"PARENT {' '.join(_lowercase )}" )
return pdb_headers
def lowerCamelCase_ ( _lowercase , _lowercase ) -> str:
__A : List[str] = []
__A : Optional[int] = pdb_str.split("\n" )
__A : Union[str, Any] = prot.remark
if remark is not None:
out_pdb_lines.append(F"REMARK {remark}" )
__A : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
__A : List[Any] = []
if prot.parents_chain_index is not None:
__A : Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(_lowercase ) , [] )
parent_dict[str(_lowercase )].append(_lowercase )
__A : Tuple = max([int(_lowercase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
__A : List[Any] = parent_dict.get(str(_lowercase ) , ["N/A"] )
parents_per_chain.append(_lowercase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
__A : Union[str, Any] = [["N/A"]]
def make_parent_line(_lowercase ) -> str:
return F"PARENT {' '.join(_lowercase )}"
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
__A : List[str] = 0
for i, l in enumerate(_lowercase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(_lowercase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(_lowercase ):
__A : Any = parents_per_chain[chain_counter]
else:
__A : Any = ["N/A"]
out_pdb_lines.append(make_parent_line(_lowercase ) )
return "\n".join(_lowercase )
def lowerCamelCase_ ( _lowercase ) -> str:
__A : int = residue_constants.restypes + ["X"]
def res_atoa(_lowercase ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , "UNK" )
__A : Dict = residue_constants.atom_types
__A : List[str] = []
__A : Any = prot.atom_mask
__A : Dict = prot.aatype
__A : str = prot.atom_positions
__A : Any = prot.residue_index.astype(np.intaa )
__A : Optional[Any] = prot.b_factors
__A : Any = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("Invalid aatypes." )
__A : Tuple = get_pdb_headers(_lowercase )
if len(_lowercase ) > 0:
pdb_lines.extend(_lowercase )
__A : Any = aatype.shape[0]
__A : int = 1
__A : Union[str, Any] = 0
__A : Union[str, Any] = string.ascii_uppercase
__A : Dict = None
# Add all atom sites.
for i in range(_lowercase ):
__A : Tuple = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(_lowercase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
__A : Optional[Any] = "ATOM"
__A : List[str] = atom_name if len(_lowercase ) == 4 else F" {atom_name}"
__A : Dict = ""
__A : Optional[Any] = ""
__A : List[str] = 1.00
__A : str = atom_name[0] # Protein supports only C, N, O, S, this works.
__A : Union[str, Any] = ""
__A : str = "A"
if chain_index is not None:
__A : Any = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
__A : str = (
F"{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"
F"{res_name_a:>3} {chain_tag:>1}"
F"{residue_index[i]:>4}{insertion_code:>1} "
F"{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"
F"{occupancy:>6.2f}{b_factor:>6.2f} "
F"{element:>2}{charge:>2}"
)
pdb_lines.append(_lowercase )
atom_index += 1
__A : Optional[Any] = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
__A : List[Any] = True
__A : int = chain_index[i + 1]
if should_terminate:
# Close the chain.
__A : Any = "TER"
__A : str = (
F"{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"
)
pdb_lines.append(_lowercase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(_lowercase , _lowercase ) )
pdb_lines.append("END" )
pdb_lines.append("" )
return "\n".join(_lowercase )
def lowerCamelCase_ ( _lowercase ) -> np.ndarray:
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def lowerCamelCase_ ( _lowercase , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , ) -> Protein:
return Protein(
aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) , chain_index=_lowercase , remark=_lowercase , parents=_lowercase , parents_chain_index=_lowercase , )
| 387 | 0 |
'''simple docstring'''
from __future__ import annotations
from random import random
class UpperCamelCase__ :
def __init__( self : str , lowerCamelCase : Union[str, Any] = None ):
'''simple docstring'''
a__ = value
a__ = random()
a__ = None
a__ = None
def __repr__( self : Optional[Any] ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return F'''\'{self.value}: {self.prior:.5}\''''
else:
return pformat(
{F'''{self.value}: {self.prior:.5}''': (self.left, self.right)} , indent=1 )
def __str__( self : List[str] ):
'''simple docstring'''
a__ = str(self.value ) + ''' '''
a__ = str(self.left or "" )
a__ = str(self.right or "" )
return value + left + right
def _lowerCamelCase (__lowerCamelCase : Node | None , __lowerCamelCase : int ) -> tuple[Node | None, Node | None]:
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
a__ = split(root.left , snake_case_ )
return left, root
else:
a__ = split(root.right , snake_case_ )
return root, right
def _lowerCamelCase (__lowerCamelCase : Node | None , __lowerCamelCase : Node | None ) -> Node | None:
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
a__ = merge(left.right , snake_case_ )
return left
else:
a__ = merge(snake_case_ , right.left )
return right
def _lowerCamelCase (__lowerCamelCase : Node | None , __lowerCamelCase : int ) -> Node | None:
a__ = Node(snake_case_ )
a__ = split(snake_case_ , snake_case_ )
return merge(merge(snake_case_ , snake_case_ ) , snake_case_ )
def _lowerCamelCase (__lowerCamelCase : Node | None , __lowerCamelCase : int ) -> Node | None:
a__ = split(snake_case_ , value - 1 )
a__ = split(snake_case_ , snake_case_ )
return merge(snake_case_ , snake_case_ )
def _lowerCamelCase (__lowerCamelCase : Node | None ) -> None:
if not root: # None
return
else:
inorder(root.left )
print(root.value , end="," )
inorder(root.right )
def _lowerCamelCase (__lowerCamelCase : Node | None , __lowerCamelCase : str ) -> Node | None:
for arg in args.split():
if arg[0] == "+":
a__ = insert(snake_case_ , int(arg[1:] ) )
elif arg[0] == "-":
a__ = erase(snake_case_ , int(arg[1:] ) )
else:
print("Unknown command" )
return root
def _lowerCamelCase () -> None:
a__ = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. \'q\' to quit. " )
a__ = input()
while args != "q":
a__ = interact_treap(snake_case_ , snake_case_ )
print(snake_case_ )
a__ = input()
print("good by!" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 489 |
"""simple docstring"""
from collections import namedtuple
import requests
from lxml import html # type: ignore
a_ = namedtuple("""covid_data""", """cases deaths recovered""")
def __lowercase ( snake_case_ : str = "https://www.worldometers.info/coronavirus/" ) ->covid_data:
'''simple docstring'''
__A : List[str] = '''//div[@class = "maincounter-number"]/span/text()'''
return covid_data(*html.fromstring(requests.get(snake_case_ ).content ).xpath(snake_case_ ) )
a_ = """Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}"""
print(fmt.format(*covid_stats()))
| 177 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( __magic_name__ ,__magic_name__ ,unittest.TestCase ):
__lowerCamelCase : Any = StableDiffusionXLImgaImgPipeline
__lowerCamelCase : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__lowerCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"latents"}
__lowerCamelCase : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__lowerCamelCase : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
__lowerCamelCase : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _snake_case ( self ) -> List[str]:
torch.manual_seed(0 )
_lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , attention_head_dim=(2, 4) , use_linear_projection=_lowerCAmelCase , addition_embed_type="text_time" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
_lowerCAmelCase = EulerDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , steps_offset=1 , beta_schedule="scaled_linear" , timestep_spacing="leading" , )
torch.manual_seed(0 )
_lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=32 , )
_lowerCAmelCase = CLIPTextModel(_lowerCAmelCase )
_lowerCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=_lowerCAmelCase )
_lowerCAmelCase = CLIPTextModelWithProjection(_lowerCAmelCase )
_lowerCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=_lowerCAmelCase )
_lowerCAmelCase = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_encoder_2": text_encoder_a,
"tokenizer_2": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase=0 ) -> List[Any]:
_lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
_lowerCAmelCase = image / 2 + 0.5
if str(_lowerCAmelCase ).startswith("mps" ):
_lowerCAmelCase = torch.manual_seed(_lowerCAmelCase )
else:
_lowerCAmelCase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
_lowerCAmelCase = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"output_type": "numpy",
"strength": 0.75,
}
return inputs
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = StableDiffusionXLImgaImgPipeline(**_lowerCAmelCase )
_lowerCAmelCase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowerCAmelCase = self.get_dummy_inputs(_lowerCAmelCase )
_lowerCAmelCase = sd_pipe(**_lowerCAmelCase ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCAmelCase = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Optional[Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def _snake_case ( self ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def _snake_case ( self ) -> Any:
pass
def _snake_case ( self ) -> Any:
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = StableDiffusionXLImgaImgPipeline(**_lowerCAmelCase )
_lowerCAmelCase = sd_pipe.to(_lowerCAmelCase )
_lowerCAmelCase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
# forward without prompt embeds
_lowerCAmelCase = self.get_dummy_inputs(_lowerCAmelCase )
_lowerCAmelCase = 3 * ["this is a negative prompt"]
_lowerCAmelCase = negative_prompt
_lowerCAmelCase = 3 * [inputs["prompt"]]
_lowerCAmelCase = sd_pipe(**_lowerCAmelCase )
_lowerCAmelCase = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
_lowerCAmelCase = self.get_dummy_inputs(_lowerCAmelCase )
_lowerCAmelCase = 3 * ["this is a negative prompt"]
_lowerCAmelCase = 3 * [inputs.pop("prompt" )]
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = sd_pipe.encode_prompt(_lowerCAmelCase , negative_prompt=_lowerCAmelCase )
_lowerCAmelCase = sd_pipe(
**_lowerCAmelCase , prompt_embeds=_lowerCAmelCase , negative_prompt_embeds=_lowerCAmelCase , pooled_prompt_embeds=_lowerCAmelCase , negative_pooled_prompt_embeds=_lowerCAmelCase , )
_lowerCAmelCase = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def _snake_case ( self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase="cpu" , _lowerCAmelCase=torch.floataa , _lowerCAmelCase=0 ) -> Union[str, Any]:
_lowerCAmelCase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
_lowerCAmelCase = np.random.RandomState(_lowerCAmelCase ).standard_normal((1, 4, 64, 64) )
_lowerCAmelCase = torch.from_numpy(_lowerCAmelCase ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase )
_lowerCAmelCase = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def _snake_case ( self ) -> Any:
_lowerCAmelCase = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base" )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowerCAmelCase = self.get_inputs(_lowerCAmelCase )
_lowerCAmelCase = pipe(**_lowerCAmelCase ).images
_lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase = np.array([0.49493, 0.47896, 0.40798, 0.54214, 0.53212, 0.48202, 0.47656, 0.46329, 0.48506] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 708 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : str = "speech_to_text_2"
__lowerCamelCase : Union[str, Any] = ["past_key_values"]
__lowerCamelCase : Tuple = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , _lowerCAmelCase=10000 , _lowerCAmelCase=6 , _lowerCAmelCase=2048 , _lowerCAmelCase=4 , _lowerCAmelCase=0.0 , _lowerCAmelCase=True , _lowerCAmelCase="relu" , _lowerCAmelCase=256 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=2 , _lowerCAmelCase=True , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , _lowerCAmelCase=1024 , **_lowerCAmelCase , ) -> Dict:
_lowerCAmelCase = vocab_size
_lowerCAmelCase = d_model
_lowerCAmelCase = decoder_ffn_dim
_lowerCAmelCase = decoder_layers
_lowerCAmelCase = decoder_attention_heads
_lowerCAmelCase = dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = activation_function
_lowerCAmelCase = init_std
_lowerCAmelCase = decoder_layerdrop
_lowerCAmelCase = use_cache
_lowerCAmelCase = decoder_layers
_lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCAmelCase = max_target_positions
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
| 489 | 0 |
import argparse
from collections import defaultdict
import yaml
UpperCAmelCase_ : List[str] = '''docs/source/en/_toctree.yml'''
def __SCREAMING_SNAKE_CASE ( a__ : int ) -> Union[str, Any]:
__A : Union[str, Any] = defaultdict(a__ )
__A : Dict = []
__A : Dict = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} )
else:
new_doc_list.append(a__ )
__A : Optional[int] = new_doc_list
__A : Optional[int] = [key for key, value in counts.items() if value > 1]
__A : Tuple = []
for duplicate_key in duplicates:
__A : Union[str, Any] = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} )
if len(a__ ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] )
__A : str = sorted(a__ ,key=lambda a__ : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(a__ ) > 1:
raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" )
overview_doc.extend(a__ )
# Sort
return overview_doc
def __SCREAMING_SNAKE_CASE ( a__ : Tuple=False ) -> List[str]:
with open(a__ ,encoding="""utf-8""" ) as f:
__A : Optional[Any] = yaml.safe_load(f.read() )
# Get to the API doc
__A : List[str] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__A : Tuple = content[api_idx]["""sections"""]
# Then to the model doc
__A : List[Any] = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
__A : List[Any] = api_doc[scheduler_idx]["""sections"""]
__A : Tuple = clean_doc_toc(a__ )
__A : Tuple = False
if new_scheduler_doc != scheduler_doc:
__A : List[str] = True
if overwrite:
__A : List[str] = new_scheduler_doc
if diff:
if overwrite:
__A : Optional[int] = api_doc
with open(a__ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(yaml.dump(a__ ,allow_unicode=a__ ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
def __SCREAMING_SNAKE_CASE ( a__ : Any=False ) -> Any:
with open(a__ ,encoding="""utf-8""" ) as f:
__A : Union[str, Any] = yaml.safe_load(f.read() )
# Get to the API doc
__A : Union[str, Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__A : Dict = content[api_idx]["""sections"""]
# Then to the model doc
__A : Dict = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
__A : Optional[Any] = False
__A : List[str] = api_doc[pipeline_idx]["""sections"""]
__A : Optional[int] = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
__A : List[Any] = pipeline_doc["""section"""]
__A : int = clean_doc_toc(a__ )
if overwrite:
__A : Dict = new_sub_pipeline_doc
new_pipeline_docs.append(a__ )
# sort overall pipeline doc
__A : Tuple = clean_doc_toc(a__ )
if new_pipeline_docs != pipeline_docs:
__A : List[str] = True
if overwrite:
__A : List[Any] = new_pipeline_docs
if diff:
if overwrite:
__A : Optional[Any] = api_doc
with open(a__ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(yaml.dump(a__ ,allow_unicode=a__ ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
UpperCAmelCase_ : Tuple = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 17 |
'''simple docstring'''
import random
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : Optional[Any] = a[left_index]
A : List[str] = left_index + 1
for j in range(left_index + 1 , snake_case__ ):
if a[j] < pivot:
A, A : Optional[int] = a[i], a[j]
i += 1
A, A : str = a[i - 1], a[left_index]
return i - 1
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if left < right:
A : Optional[Any] = random.randint(snake_case__ , right - 1 )
A, A : List[str] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
A : Any = partition(snake_case__ , snake_case__ , snake_case__ )
quick_sort_random(
snake_case__ , snake_case__ , snake_case__ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
snake_case__ , pivot_index + 1 , snake_case__ ) # recursive quicksort to the right of the pivot point
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Any = input('''Enter numbers separated by a comma:\n''' ).strip()
A : List[str] = [int(snake_case__ ) for item in user_input.split(''',''' )]
quick_sort_random(snake_case__ , 0 , len(snake_case__ ) )
print(snake_case__ )
if __name__ == "__main__":
main()
| 634 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def lowercase_ (A : List[str] , A : Dict=False ):
snake_case__ : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''module.blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''module.blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('module.cls_token', 'vit.embeddings.cls_token'),
('module.patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('module.patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('module.pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('module.norm.weight', 'layernorm.weight'),
('module.norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case__ : Tuple = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def lowercase_ (A : Any , A : Dict , A : List[Any]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ : Any = ''
else:
snake_case__ : Union[str, Any] = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ : Dict = state_dict.pop(F'''module.blocks.{i}.attn.qkv.weight''' )
snake_case__ : int = state_dict.pop(F'''module.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : List[Any] = in_proj_weight[
: config.hidden_size, :
]
snake_case__ : Dict = in_proj_bias[: config.hidden_size]
snake_case__ : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ : Dict = in_proj_bias[-config.hidden_size :]
def lowercase_ (A : Any ):
snake_case__ : Dict = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(A , A )
def lowercase_ (A : Dict ):
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
snake_case__ : Dict = [
'module.fc.fc1.weight',
'module.fc.fc1.bias',
'module.fc.bn1.weight',
'module.fc.bn1.bias',
'module.fc.bn1.running_mean',
'module.fc.bn1.running_var',
'module.fc.bn1.num_batches_tracked',
'module.fc.fc2.weight',
'module.fc.fc2.bias',
'module.fc.bn2.weight',
'module.fc.bn2.bias',
'module.fc.bn2.running_mean',
'module.fc.bn2.running_var',
'module.fc.bn2.num_batches_tracked',
'module.fc.fc3.weight',
'module.fc.fc3.bias',
]
for k in ignore_keys:
state_dict.pop(A , A )
def lowercase_ (A : Tuple , A : Optional[Any] , A : int ):
snake_case__ : str = dct.pop(A )
snake_case__ : Optional[int] = val
def lowercase_ (A : List[Any] , A : Tuple ):
snake_case__ : List[str] = ViTMSNConfig()
snake_case__ : int = 1_0_0_0
snake_case__ : Optional[int] = 'datasets/huggingface/label-files'
snake_case__ : int = 'imagenet-1k-id2label.json'
snake_case__ : Union[str, Any] = json.load(open(hf_hub_download(A , A ) , 'r' ) )
snake_case__ : Dict = {int(A ): v for k, v in idalabel.items()}
snake_case__ : List[Any] = idalabel
snake_case__ : Any = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
snake_case__ : Optional[int] = 3_8_4
snake_case__ : Tuple = 1_5_3_6
snake_case__ : List[str] = 6
elif "l16" in checkpoint_url:
snake_case__ : Any = 1_0_2_4
snake_case__ : Optional[Any] = 4_0_9_6
snake_case__ : Tuple = 2_4
snake_case__ : Optional[Any] = 1_6
snake_case__ : List[Any] = 0.1
elif "b4" in checkpoint_url:
snake_case__ : List[str] = 4
elif "l7" in checkpoint_url:
snake_case__ : str = 7
snake_case__ : Tuple = 1_0_2_4
snake_case__ : Dict = 4_0_9_6
snake_case__ : Dict = 2_4
snake_case__ : Dict = 1_6
snake_case__ : List[str] = 0.1
snake_case__ : List[Any] = ViTMSNModel(A )
snake_case__ : Optional[Any] = torch.hub.load_state_dict_from_url(A , map_location='cpu' )['target_encoder']
snake_case__ : Any = ViTImageProcessor(size=config.image_size )
remove_projection_head(A )
snake_case__ : Optional[int] = create_rename_keys(A , base_model=A )
for src, dest in rename_keys:
rename_key(A , A , A )
read_in_q_k_v(A , A , base_model=A )
model.load_state_dict(A )
model.eval()
snake_case__ : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case__ : Optional[int] = Image.open(requests.get(A , stream=A ).raw )
snake_case__ : Optional[int] = ViTImageProcessor(
size=config.image_size , image_mean=A , image_std=A )
snake_case__ : Optional[int] = image_processor(images=A , return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
snake_case__ : Dict = model(**A )
snake_case__ : Any = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
snake_case__ : List[Any] = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
snake_case__ : Any = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
snake_case__ : Optional[int] = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
snake_case__ : List[str] = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
snake_case__ : str = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , A , atol=1e-4 )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(A )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(A )
if __name__ == "__main__":
a_ :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
a_ :Optional[Any] = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 243 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
a_ :Dict = logging.getLogger(__name__)
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """summarization"""
_SCREAMING_SNAKE_CASE = ["""loss"""]
_SCREAMING_SNAKE_CASE = ROUGE_KEYS
_SCREAMING_SNAKE_CASE = """rouge2"""
def __init__( self : Dict, _snake_case : Dict, **_snake_case : str ) ->List[Any]:
if hparams.sortish_sampler and hparams.gpus > 1:
snake_case__ : Union[str, Any] = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training' )
if hparams.sortish_sampler:
raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously' )
super().__init__(_snake_case, num_labels=_snake_case, mode=self.mode, **_snake_case )
use_task_specific_params(self.model, 'summarization' )
save_git_info(self.hparams.output_dir )
snake_case__ : List[str] = Path(self.output_dir ) / 'metrics.json'
snake_case__ : Tuple = Path(self.output_dir ) / 'hparams.pkl'
pickle_save(self.hparams, self.hparams_save_path )
snake_case__ : List[str] = 0
snake_case__ : int = defaultdict(_snake_case )
snake_case__ : Optional[Any] = self.config.model_type
snake_case__ : int = self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size
snake_case__ : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
snake_case__ : Union[str, Any] = {
'train': self.hparams.n_train,
'val': self.hparams.n_val,
'test': self.hparams.n_test,
}
snake_case__ : int = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
snake_case__ : Union[str, Any] = {
'train': self.hparams.max_target_length,
'val': self.hparams.val_max_target_length,
'test': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
snake_case__ : Optional[Any] = get_git_info()['repo_sha']
snake_case__ : Any = hparams.num_workers
snake_case__ : str = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer, _snake_case ):
snake_case__ : int = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
snake_case__ : Optional[Any] = self.decoder_start_token_id
snake_case__ : int = (
SeqaSeqDataset if hasattr(self.tokenizer, 'prepare_seq2seq_batch' ) else LegacySeqaSeqDataset
)
snake_case__ : Tuple = False
snake_case__ : Tuple = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
snake_case__ : Any = self.hparams.eval_max_gen_length
else:
snake_case__ : List[Any] = self.model.config.max_length
snake_case__ : List[Any] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def lowercase_ ( self : Tuple, _snake_case : Dict[str, torch.Tensor] ) ->Dict[str, List[str]]:
snake_case__ : List[str] = {
k: self.tokenizer.batch_decode(v.tolist() ) if 'mask' not in k else v.shape for k, v in batch.items()
}
save_json(_snake_case, Path(self.output_dir ) / 'text_batch.json' )
save_json({k: v.tolist() for k, v in batch.items()}, Path(self.output_dir ) / 'tok_batch.json' )
snake_case__ : Optional[int] = True
return readable_batch
def lowercase_ ( self : List[Any], _snake_case : Dict, **_snake_case : str ) ->int:
return self.model(_snake_case, **_snake_case )
def lowercase_ ( self : str, _snake_case : List[int] ) ->List[str]:
snake_case__ : int = self.tokenizer.batch_decode(
_snake_case, skip_special_tokens=_snake_case, clean_up_tokenization_spaces=_snake_case )
return lmap(str.strip, _snake_case )
def lowercase_ ( self : List[str], _snake_case : dict ) ->Tuple:
snake_case__ : List[str] = self.tokenizer.pad_token_id
snake_case__ , snake_case__ : Any = batch['input_ids'], batch['attention_mask']
snake_case__ : Optional[int] = batch['labels']
if isinstance(self.model, _snake_case ):
snake_case__ : Union[str, Any] = self.model._shift_right(_snake_case )
else:
snake_case__ : List[Any] = shift_tokens_right(_snake_case, _snake_case )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
snake_case__ : Optional[int] = decoder_input_ids
self.save_readable_batch(_snake_case )
snake_case__ : List[str] = self(_snake_case, attention_mask=_snake_case, decoder_input_ids=_snake_case, use_cache=_snake_case )
snake_case__ : Dict = outputs['logits']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
snake_case__ : Any = nn.CrossEntropyLoss(ignore_index=_snake_case )
assert lm_logits.shape[-1] == self.vocab_size
snake_case__ : Optional[Any] = ce_loss_fct(lm_logits.view(-1, lm_logits.shape[-1] ), tgt_ids.view(-1 ) )
else:
snake_case__ : str = nn.functional.log_softmax(_snake_case, dim=-1 )
snake_case__ , snake_case__ : Union[str, Any] = label_smoothed_nll_loss(
_snake_case, _snake_case, self.hparams.label_smoothing, ignore_index=_snake_case )
return (loss,)
@property
def lowercase_ ( self : Dict ) ->int:
return self.tokenizer.pad_token_id
def lowercase_ ( self : Union[str, Any], _snake_case : List[str], _snake_case : Any ) ->Dict:
snake_case__ : Dict = self._step(_snake_case )
snake_case__ : Optional[int] = dict(zip(self.loss_names, _snake_case ) )
# tokens per batch
snake_case__ : Optional[Any] = batch['input_ids'].ne(self.pad ).sum() + batch['labels'].ne(self.pad ).sum()
snake_case__ : List[str] = batch['input_ids'].shape[0]
snake_case__ : List[str] = batch['input_ids'].eq(self.pad ).sum()
snake_case__ : Optional[Any] = batch['input_ids'].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def lowercase_ ( self : Union[str, Any], _snake_case : Union[str, Any], _snake_case : List[str] ) ->Dict:
return self._generative_step(_snake_case )
def lowercase_ ( self : int, _snake_case : Dict, _snake_case : List[Any]="val" ) ->Dict:
self.step_count += 1
snake_case__ : str = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
snake_case__ : Optional[int] = losses['loss']
snake_case__ : Optional[int] = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['gen_time', 'gen_len']
}
snake_case__ : Optional[int] = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
snake_case__ : torch.FloatTensor = torch.tensor(_snake_case ).type_as(_snake_case )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(_snake_case )
snake_case__ : Optional[int] = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()}
snake_case__ : List[str] = self.step_count
self.metrics[prefix].append(_snake_case ) # callback writes this to self.metrics_save_path
snake_case__ : List[str] = flatten_list([x['preds'] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'''{prefix}_loss''': loss,
F'''{prefix}_{self.val_metric}''': metric_tensor,
}
def lowercase_ ( self : Optional[int], _snake_case : Optional[Any], _snake_case : Optional[int] ) ->Dict:
return calculate_rouge(_snake_case, _snake_case )
def lowercase_ ( self : Optional[int], _snake_case : dict ) ->dict:
snake_case__ : Tuple = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
snake_case__ : List[str] = self.model.generate(
batch['input_ids'], attention_mask=batch['attention_mask'], use_cache=_snake_case, decoder_start_token_id=self.decoder_start_token_id, num_beams=self.eval_beams, max_length=self.eval_max_length, )
snake_case__ : str = (time.time() - ta) / batch['input_ids'].shape[0]
snake_case__ : List[str] = self.ids_to_clean_text(_snake_case )
snake_case__ : List[str] = self.ids_to_clean_text(batch['labels'] )
snake_case__ : List[Any] = self._step(_snake_case )
snake_case__ : Any = dict(zip(self.loss_names, _snake_case ) )
snake_case__ : Dict = self.calc_generative_metrics(_snake_case, _snake_case )
snake_case__ : int = np.mean(lmap(_snake_case, _snake_case ) )
base_metrics.update(gen_time=_snake_case, gen_len=_snake_case, preds=_snake_case, target=_snake_case, **_snake_case )
return base_metrics
def lowercase_ ( self : Tuple, _snake_case : Dict, _snake_case : Union[str, Any] ) ->Tuple:
return self._generative_step(_snake_case )
def lowercase_ ( self : Dict, _snake_case : Union[str, Any] ) ->str:
return self.validation_epoch_end(_snake_case, prefix='test' )
def lowercase_ ( self : Union[str, Any], _snake_case : Any ) ->SeqaSeqDataset:
snake_case__ : Optional[int] = self.n_obs[type_path]
snake_case__ : str = self.target_lens[type_path]
snake_case__ : Optional[int] = self.dataset_class(
self.tokenizer, type_path=_snake_case, n_obs=_snake_case, max_target_length=_snake_case, **self.dataset_kwargs, )
return dataset
def lowercase_ ( self : Any, _snake_case : str, _snake_case : int, _snake_case : bool = False ) ->DataLoader:
snake_case__ : Union[str, Any] = self.get_dataset(_snake_case )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
snake_case__ : str = dataset.make_sortish_sampler(_snake_case, distributed=self.hparams.gpus > 1 )
return DataLoader(
_snake_case, batch_size=_snake_case, collate_fn=dataset.collate_fn, shuffle=_snake_case, num_workers=self.num_workers, sampler=_snake_case, )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
snake_case__ : Dict = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch, distributed=self.hparams.gpus > 1 )
return DataLoader(
_snake_case, batch_sampler=_snake_case, collate_fn=dataset.collate_fn, num_workers=self.num_workers, )
else:
return DataLoader(
_snake_case, batch_size=_snake_case, collate_fn=dataset.collate_fn, shuffle=_snake_case, num_workers=self.num_workers, sampler=_snake_case, )
def lowercase_ ( self : int ) ->DataLoader:
snake_case__ : Union[str, Any] = self.get_dataloader('train', batch_size=self.hparams.train_batch_size, shuffle=_snake_case )
return dataloader
def lowercase_ ( self : str ) ->DataLoader:
return self.get_dataloader('val', batch_size=self.hparams.eval_batch_size )
def lowercase_ ( self : List[Any] ) ->DataLoader:
return self.get_dataloader('test', batch_size=self.hparams.eval_batch_size )
@staticmethod
def lowercase_ ( _snake_case : Dict, _snake_case : str ) ->str:
BaseTransformer.add_model_specific_args(_snake_case, _snake_case )
add_generic_args(_snake_case, _snake_case )
parser.add_argument(
'--max_source_length', default=1_0_2_4, type=_snake_case, help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
), )
parser.add_argument(
'--max_target_length', default=5_6, type=_snake_case, help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
), )
parser.add_argument(
'--val_max_target_length', default=1_4_2, type=_snake_case, help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
), )
parser.add_argument(
'--test_max_target_length', default=1_4_2, type=_snake_case, help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
), )
parser.add_argument('--freeze_encoder', action='store_true' )
parser.add_argument('--freeze_embeds', action='store_true' )
parser.add_argument('--sortish_sampler', action='store_true', default=_snake_case )
parser.add_argument('--overwrite_output_dir', action='store_true', default=_snake_case )
parser.add_argument('--max_tokens_per_batch', type=_snake_case, default=_snake_case )
parser.add_argument('--logger_name', type=_snake_case, choices=['default', 'wandb', 'wandb_shared'], default='default' )
parser.add_argument('--n_train', type=_snake_case, default=-1, required=_snake_case, help='# examples. -1 means use all.' )
parser.add_argument('--n_val', type=_snake_case, default=5_0_0, required=_snake_case, help='# examples. -1 means use all.' )
parser.add_argument('--n_test', type=_snake_case, default=-1, required=_snake_case, help='# examples. -1 means use all.' )
parser.add_argument(
'--task', type=_snake_case, default='summarization', required=_snake_case, help='# examples. -1 means use all.' )
parser.add_argument('--label_smoothing', type=_snake_case, default=0.0, required=_snake_case )
parser.add_argument('--src_lang', type=_snake_case, default='', required=_snake_case )
parser.add_argument('--tgt_lang', type=_snake_case, default='', required=_snake_case )
parser.add_argument('--eval_beams', type=_snake_case, default=_snake_case, required=_snake_case )
parser.add_argument(
'--val_metric', type=_snake_case, default=_snake_case, required=_snake_case, choices=['bleu', 'rouge2', 'loss', None] )
parser.add_argument('--eval_max_gen_length', type=_snake_case, default=_snake_case, help='never generate more than n tokens' )
parser.add_argument('--save_top_k', type=_snake_case, default=1, required=_snake_case, help='How many checkpoints to save' )
parser.add_argument(
'--early_stopping_patience', type=_snake_case, default=-1, required=_snake_case, help=(
'-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'
' val_check_interval will effect it.'
), )
return parser
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """translation"""
_SCREAMING_SNAKE_CASE = ["""loss"""]
_SCREAMING_SNAKE_CASE = ["""bleu"""]
_SCREAMING_SNAKE_CASE = """bleu"""
def __init__( self : List[Any], _snake_case : str, **_snake_case : Tuple ) ->List[str]:
super().__init__(_snake_case, **_snake_case )
snake_case__ : Any = hparams.src_lang
snake_case__ : int = hparams.tgt_lang
def lowercase_ ( self : Union[str, Any], _snake_case : List[Any], _snake_case : List[Any] ) ->dict:
return calculate_bleu(_snake_case, _snake_case )
def lowercase_ (A : str , A : List[Any]=None ):
Path(args.output_dir ).mkdir(exist_ok=A )
check_output_dir(A , expected_items=3 )
if model is None:
if "summarization" in args.task:
snake_case__ : SummarizationModule = SummarizationModule(A )
else:
snake_case__ : SummarizationModule = TranslationModule(A )
snake_case__ : Dict = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('/tmp' )
or str(args.output_dir ).startswith('/var' )
):
snake_case__ : List[Any] = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
snake_case__ : Dict = os.environ.get('WANDB_PROJECT' , A )
snake_case__ : Union[str, Any] = WandbLogger(name=model.output_dir.name , project=A )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
snake_case__ : List[Any] = WandbLogger(name=model.output_dir.name , project=F'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
snake_case__ : Union[str, Any] = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
snake_case__ : List[str] = False
snake_case__ : List[Any] = args.val_metric == 'loss'
snake_case__ : pl.Trainer = generic_train(
A , A , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , A ) , early_stopping_callback=A , logger=A , )
pickle_save(model.hparams , model.output_dir / 'hparams.pkl' )
if not args.do_predict:
return model
snake_case__ : int = ''
snake_case__ : Union[str, Any] = sorted(glob.glob(os.path.join(args.output_dir , '*.ckpt' ) , recursive=A ) )
if checkpoints:
snake_case__ : Any = checkpoints[-1]
snake_case__ : List[str] = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
a_ :Any = argparse.ArgumentParser()
a_ :Dict = pl.Trainer.add_argparse_args(parser)
a_ :Optional[Any] = SummarizationModule.add_model_specific_args(parser, os.getcwd())
a_ :Dict = parser.parse_args()
main(args)
| 243 | 1 |
"""simple docstring"""
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
UpperCamelCase__ :Optional[int] = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class A( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase :Any = None
_UpperCamelCase :Optional[int] = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
_UpperCamelCase :Union[str, Any] = os.path.abspath('''examples''' )
for item in os.listdir(SCREAMING_SNAKE_CASE__ ):
if item not in EXCLUDE_EXAMPLES:
_UpperCamelCase :Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if os.path.isfile(SCREAMING_SNAKE_CASE__ ) and ".py" in item_path:
with self.subTest(
tested_script=SCREAMING_SNAKE_CASE__ , feature_script=SCREAMING_SNAKE_CASE__ , tested_section='''main()''' if parser_only else '''training_function()''' , ):
_UpperCamelCase :List[str] = compare_against_test(
os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :Optional[Any] = '''\n'''.join(SCREAMING_SNAKE_CASE__ )
if special_strings is not None:
for string in special_strings:
_UpperCamelCase :List[Any] = diff.replace(SCREAMING_SNAKE_CASE__ , '''''' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , '''''' )
def _UpperCamelCase( self ) -> Union[str, Any]:
"""simple docstring"""
self.one_complete_example('''complete_nlp_example.py''' , SCREAMING_SNAKE_CASE__ )
self.one_complete_example('''complete_nlp_example.py''' , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase :int = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
_UpperCamelCase :List[str] = [
''' ''' * 16 + '''{\n\n''',
''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 20 + '''"epoch": epoch,\n\n''',
''' ''' * 16 + '''},\n\n''',
''' ''' * 16 + '''step=epoch,\n''',
''' ''' * 12,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.one_complete_example('''complete_cv_example.py''' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} )
class A( lowerCamelCase__ ):
"""simple docstring"""
A = False
@classmethod
def _UpperCamelCase( cls ) -> List[Any]:
"""simple docstring"""
super().setUpClass()
_UpperCamelCase :Optional[Any] = tempfile.mkdtemp()
_UpperCamelCase :Optional[Any] = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
_UpperCamelCase :Optional[int] = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def _UpperCamelCase( cls ) -> Tuple:
"""simple docstring"""
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def _UpperCamelCase( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase :Any = f"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def _UpperCamelCase( self ) -> Dict:
"""simple docstring"""
_UpperCamelCase :List[Any] = f"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split()
_UpperCamelCase :Any = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def _UpperCamelCase( self ) -> str:
"""simple docstring"""
_UpperCamelCase :Any = f"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}\n ".split()
_UpperCamelCase :Any = run_command(self._launch_args + testargs , return_stdout=SCREAMING_SNAKE_CASE__ )
self.assertNotIn('''epoch 0:''' , SCREAMING_SNAKE_CASE__ )
self.assertIn('''epoch 1:''' , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase :Dict = f"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}\n ".split()
_UpperCamelCase :str = run_command(self._launch_args + testargs , return_stdout=SCREAMING_SNAKE_CASE__ )
if torch.cuda.is_available():
_UpperCamelCase :Tuple = torch.cuda.device_count()
else:
_UpperCamelCase :str = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , SCREAMING_SNAKE_CASE__ )
self.assertIn('''epoch 1:''' , SCREAMING_SNAKE_CASE__ )
else:
self.assertIn('''epoch 0:''' , SCREAMING_SNAKE_CASE__ )
self.assertIn('''epoch 1:''' , SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase( self ) -> Any:
"""simple docstring"""
_UpperCamelCase :Optional[int] = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
_UpperCamelCase :Optional[int] = run_command(self._launch_args + testargs , return_stdout=SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :Dict = re.findall('''({.+})''' , SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :Optional[Any] = [r for r in results if '''accuracy''' in r][-1]
_UpperCamelCase :Tuple = ast.literal_eval(SCREAMING_SNAKE_CASE__ )
self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 )
def _UpperCamelCase( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase :List[str] = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _UpperCamelCase( self ) -> Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
_UpperCamelCase :Optional[int] = f"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ , '''tracking''' ) ) )
def _UpperCamelCase( self ) -> Any:
"""simple docstring"""
_UpperCamelCase :Optional[Any] = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def _UpperCamelCase( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase :Optional[Any] = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 355 |
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
UpperCamelCase__ :Dict = logging.get_logger(__name__)
UpperCamelCase__ :int = {
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""",
# See all BART models at https://huggingface.co/models?filter=bart
}
class A( lowerCamelCase__ ):
"""simple docstring"""
A = "bart"
A = ["past_key_values"]
A = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , SCREAMING_SNAKE_CASE__=5_02_65 , SCREAMING_SNAKE_CASE__=10_24 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=40_96 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=40_96 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=10_24 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0_2 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=2 , **SCREAMING_SNAKE_CASE__ , ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase :Union[str, Any] = vocab_size
_UpperCamelCase :Union[str, Any] = max_position_embeddings
_UpperCamelCase :int = d_model
_UpperCamelCase :Any = encoder_ffn_dim
_UpperCamelCase :str = encoder_layers
_UpperCamelCase :str = encoder_attention_heads
_UpperCamelCase :Optional[int] = decoder_ffn_dim
_UpperCamelCase :Any = decoder_layers
_UpperCamelCase :Dict = decoder_attention_heads
_UpperCamelCase :str = dropout
_UpperCamelCase :Tuple = attention_dropout
_UpperCamelCase :Optional[Any] = activation_dropout
_UpperCamelCase :Optional[Any] = activation_function
_UpperCamelCase :int = init_std
_UpperCamelCase :Any = encoder_layerdrop
_UpperCamelCase :List[Any] = decoder_layerdrop
_UpperCamelCase :Union[str, Any] = classifier_dropout
_UpperCamelCase :List[Any] = use_cache
_UpperCamelCase :str = encoder_layers
_UpperCamelCase :int = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=SCREAMING_SNAKE_CASE__ , pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , is_encoder_decoder=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=SCREAMING_SNAKE_CASE__ , forced_eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , SCREAMING_SNAKE_CASE__ ):
_UpperCamelCase :Any = self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
'''The config can simply be saved and uploaded again to be fixed.''' )
class A( lowerCamelCase__ ):
"""simple docstring"""
@property
def _UpperCamelCase( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_UpperCamelCase :List[Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
_UpperCamelCase :Optional[Any] = {0: '''batch'''}
_UpperCamelCase :int = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
_UpperCamelCase :Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
_UpperCamelCase :Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE__ , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_UpperCamelCase :Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
_UpperCamelCase , _UpperCamelCase :List[str] = self.num_layers
for i in range(SCREAMING_SNAKE_CASE__ ):
_UpperCamelCase :int = {0: '''batch''', 2: '''past_sequence + sequence'''}
_UpperCamelCase :List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
_UpperCamelCase :List[Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def _UpperCamelCase( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_UpperCamelCase :Optional[int] = super().outputs
else:
_UpperCamelCase :List[str] = super(SCREAMING_SNAKE_CASE__ , self ).outputs
if self.use_past:
_UpperCamelCase , _UpperCamelCase :int = self.num_layers
for i in range(SCREAMING_SNAKE_CASE__ ):
_UpperCamelCase :Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
_UpperCamelCase :Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , ) -> Mapping[str, Any]:
"""simple docstring"""
_UpperCamelCase :str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Generate decoder inputs
_UpperCamelCase :List[Any] = seq_length if not self.use_past else 1
_UpperCamelCase :Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :Union[str, Any] = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
_UpperCamelCase :str = dict(**SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_UpperCamelCase , _UpperCamelCase :Any = common_inputs['''input_ids'''].shape
_UpperCamelCase :Dict = common_inputs['''decoder_input_ids'''].shape[1]
_UpperCamelCase , _UpperCamelCase :Any = self.num_attention_heads
_UpperCamelCase :Dict = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_UpperCamelCase :Tuple = decoder_seq_length + 3
_UpperCamelCase :Optional[int] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_UpperCamelCase :Optional[int] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )] , dim=1 )
_UpperCamelCase :Union[str, Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_UpperCamelCase , _UpperCamelCase :int = self.num_layers
_UpperCamelCase :Any = min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :Optional[int] = max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) - min_num_layers
_UpperCamelCase :int = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(SCREAMING_SNAKE_CASE__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(SCREAMING_SNAKE_CASE__ ),
torch.zeros(SCREAMING_SNAKE_CASE__ ),
torch.zeros(SCREAMING_SNAKE_CASE__ ),
torch.zeros(SCREAMING_SNAKE_CASE__ ),
) )
# TODO: test this.
_UpperCamelCase :Optional[int] = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
common_inputs["past_key_values"].append((torch.zeros(SCREAMING_SNAKE_CASE__ ), torch.zeros(SCREAMING_SNAKE_CASE__ )) )
return common_inputs
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , ) -> Mapping[str, Any]:
"""simple docstring"""
_UpperCamelCase :Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_UpperCamelCase , _UpperCamelCase :List[Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
_UpperCamelCase :str = seqlen + 2
_UpperCamelCase , _UpperCamelCase :Dict = self.num_layers
_UpperCamelCase , _UpperCamelCase :Union[str, Any] = self.num_attention_heads
_UpperCamelCase :List[Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_UpperCamelCase :Tuple = common_inputs['''attention_mask'''].dtype
_UpperCamelCase :List[Any] = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )] , dim=1 )
_UpperCamelCase :int = [
(torch.zeros(SCREAMING_SNAKE_CASE__ ), torch.zeros(SCREAMING_SNAKE_CASE__ )) for _ in range(SCREAMING_SNAKE_CASE__ )
]
return common_inputs
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , ) -> Mapping[str, Any]:
"""simple docstring"""
_UpperCamelCase :List[str] = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_UpperCamelCase :str = tokenizer.num_special_tokens_to_add(SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :str = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=SCREAMING_SNAKE_CASE__ )
# Generate dummy inputs according to compute batch and sequence
_UpperCamelCase :List[Any] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
_UpperCamelCase :Optional[Any] = dict(tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) )
return common_inputs
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , ) -> Mapping[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_UpperCamelCase :str = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , seq_length=SCREAMING_SNAKE_CASE__ , is_pair=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
elif self.task == "causal-lm":
_UpperCamelCase :Optional[Any] = self._generate_dummy_inputs_for_causal_lm(
SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , seq_length=SCREAMING_SNAKE_CASE__ , is_pair=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
else:
_UpperCamelCase :Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , seq_length=SCREAMING_SNAKE_CASE__ , is_pair=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
return common_inputs
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_UpperCamelCase :Union[str, Any] = super()._flatten_past_key_values_(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
_UpperCamelCase :Tuple = super(SCREAMING_SNAKE_CASE__ , self )._flatten_past_key_values_(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 355 | 1 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
SCREAMING_SNAKE_CASE__ : List[Any] =logging.get_logger(__name__)
# TODO: upload to AWS
SCREAMING_SNAKE_CASE__ : List[str] ={
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = """retribert"""
def __init__( self , _lowercase=30522 , _lowercase=768 , _lowercase=8 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=2 , _lowercase=0.02 , _lowercase=1E-12 , _lowercase=True , _lowercase=128 , _lowercase=0 , **_lowercase , ) -> Optional[int]:
super().__init__(pad_token_id=_lowercase , **_lowercase )
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : List[str] = hidden_size
_lowerCamelCase : Any = num_hidden_layers
_lowerCamelCase : Optional[Any] = num_attention_heads
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : Tuple = intermediate_size
_lowerCamelCase : Optional[Any] = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : int = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : Optional[Any] = initializer_range
_lowerCamelCase : int = layer_norm_eps
_lowerCamelCase : int = share_encoders
_lowerCamelCase : Optional[int] = projection_dim
| 558 | """simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : str =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[Any] ={
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = """sew-d"""
def __init__( self , _lowercase=32 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase=2 , _lowercase=512 , _lowercase=256 , _lowercase=True , _lowercase=True , _lowercase=("p2c", "c2p") , _lowercase="layer_norm" , _lowercase="gelu_python" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.0 , _lowercase=0.1 , _lowercase=0.02 , _lowercase=1E-7 , _lowercase=1E-5 , _lowercase="group" , _lowercase="gelu" , _lowercase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , _lowercase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _lowercase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _lowercase=False , _lowercase=128 , _lowercase=16 , _lowercase=True , _lowercase=0.05 , _lowercase=10 , _lowercase=2 , _lowercase=0.0 , _lowercase=10 , _lowercase=0 , _lowercase="mean" , _lowercase=False , _lowercase=False , _lowercase=256 , _lowercase=0 , _lowercase=1 , _lowercase=2 , **_lowercase , ) -> str:
super().__init__(**_lowercase , pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase )
_lowerCamelCase : Optional[Any] = hidden_size
_lowerCamelCase : str = feat_extract_norm
_lowerCamelCase : int = feat_extract_activation
_lowerCamelCase : Optional[int] = list(_lowercase )
_lowerCamelCase : Any = list(_lowercase )
_lowerCamelCase : Dict = list(_lowercase )
_lowerCamelCase : List[Any] = conv_bias
_lowerCamelCase : Dict = num_conv_pos_embeddings
_lowerCamelCase : Optional[int] = num_conv_pos_embedding_groups
_lowerCamelCase : Dict = len(self.conv_dim )
_lowerCamelCase : Dict = num_hidden_layers
_lowerCamelCase : Dict = intermediate_size
_lowerCamelCase : Optional[int] = squeeze_factor
_lowerCamelCase : List[str] = max_position_embeddings
_lowerCamelCase : Any = position_buckets
_lowerCamelCase : str = share_att_key
_lowerCamelCase : Optional[int] = relative_attention
_lowerCamelCase : Tuple = norm_rel_ebd
_lowerCamelCase : Union[str, Any] = list(_lowercase )
_lowerCamelCase : int = hidden_act
_lowerCamelCase : Dict = num_attention_heads
_lowerCamelCase : str = hidden_dropout
_lowerCamelCase : int = attention_dropout
_lowerCamelCase : str = activation_dropout
_lowerCamelCase : Union[str, Any] = feat_proj_dropout
_lowerCamelCase : int = final_dropout
_lowerCamelCase : int = layer_norm_eps
_lowerCamelCase : Dict = feature_layer_norm_eps
_lowerCamelCase : Any = initializer_range
_lowerCamelCase : str = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCamelCase : Union[str, Any] = apply_spec_augment
_lowerCamelCase : Optional[Any] = mask_time_prob
_lowerCamelCase : List[Any] = mask_time_length
_lowerCamelCase : List[str] = mask_time_min_masks
_lowerCamelCase : Optional[int] = mask_feature_prob
_lowerCamelCase : List[str] = mask_feature_length
_lowerCamelCase : int = mask_feature_min_masks
# ctc loss
_lowerCamelCase : int = ctc_loss_reduction
_lowerCamelCase : List[Any] = ctc_zero_infinity
# sequence classification
_lowerCamelCase : Optional[int] = use_weighted_layer_sum
_lowerCamelCase : List[Any] = classifier_proj_size
@property
def a__ ( self ) -> Optional[Any]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 558 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowercase ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = ort.SessionOptions()
UpperCamelCase__ :str = False
return options
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
UpperCamelCase__ :List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
UpperCamelCase__ :List[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=UpperCamelCase_ , feature_extractor=UpperCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCamelCase__ :List[Any] = "A red cat sitting on a park bench"
UpperCamelCase__ :List[str] = np.random.RandomState(0 )
UpperCamelCase__ :Optional[Any] = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCamelCase_ , output_type='''np''' , )
UpperCamelCase__ :Optional[int] = output.images
UpperCamelCase__ :List[Any] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
UpperCamelCase__ :Dict = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
UpperCamelCase__ :Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
UpperCamelCase__ :Optional[Any] = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
UpperCamelCase__ :str = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=UpperCamelCase_ , safety_checker=UpperCamelCase_ , feature_extractor=UpperCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCamelCase__ :List[Any] = "A red cat sitting on a park bench"
UpperCamelCase__ :Optional[int] = np.random.RandomState(0 )
UpperCamelCase__ :Tuple = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , guidance_scale=7.5 , num_inference_steps=20 , generator=UpperCamelCase_ , output_type='''np''' , )
UpperCamelCase__ :List[Any] = output.images
UpperCamelCase__ :List[Any] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
UpperCamelCase__ :Union[str, Any] = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 | 189 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = '''roberta'''
def __init__( self , lowerCamelCase=5_02_65 , lowerCamelCase=7_68 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=30_72 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=5_12 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1e-12 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase="absolute" , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
UpperCamelCase : Any = vocab_size
UpperCamelCase : Optional[int] = hidden_size
UpperCamelCase : Dict = num_hidden_layers
UpperCamelCase : Dict = num_attention_heads
UpperCamelCase : List[Any] = hidden_act
UpperCamelCase : Optional[int] = intermediate_size
UpperCamelCase : Optional[int] = hidden_dropout_prob
UpperCamelCase : Tuple = attention_probs_dropout_prob
UpperCamelCase : int = max_position_embeddings
UpperCamelCase : List[str] = type_vocab_size
UpperCamelCase : Tuple = initializer_range
UpperCamelCase : List[Any] = layer_norm_eps
UpperCamelCase : Union[str, Any] = position_embedding_type
UpperCamelCase : Tuple = use_cache
UpperCamelCase : Any = classifier_dropout
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase : Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 173 | 0 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = list(range(len(_A ) ) )
__SCREAMING_SNAKE_CASE = [v / w for v, w in zip(_A , _A )]
index.sort(key=lambda lowerCAmelCase_ : ratio[i] , reverse=_A )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = [0] * len(_A )
for i in index:
if weight[i] <= capacity:
__SCREAMING_SNAKE_CASE = 1
max_value += value[i]
capacity -= weight[i]
else:
__SCREAMING_SNAKE_CASE = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
a__ : str = logging.getLogger(__name__)
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
snake_case__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
snake_case__ : Optional[str] = field(
default=UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"})
snake_case__ : Optional[str] = field(
default=UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
snake_case__ : Optional[str] = field(
default=UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
snake_case__ : bool = field(
default=UpperCamelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
snake_case__ : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
snake_case__ : bool = field(
default=UpperCamelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
snake_case__ : Optional[str] = field(default=UpperCamelCase , metadata={"help": "The input training data file (a text file)."})
snake_case__ : Optional[str] = field(
default=UpperCamelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
snake_case__ : bool = field(
default=UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"})
snake_case__ : Optional[int] = field(
default=UpperCamelCase , metadata={"help": "The number of processes to use for the preprocessing."} , )
snake_case__ : Optional[int] = field(
default=UpperCamelCase , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case__ : bool = field(
default=UpperCamelCase , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
snake_case__ : Optional[int] = field(
default=UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
snake_case__ : Optional[int] = field(
default=UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
if self.train_file is not None:
__SCREAMING_SNAKE_CASE = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__SCREAMING_SNAKE_CASE = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
snake_case__ : PreTrainedTokenizerBase
snake_case__ : Union[bool, str, PaddingStrategy] = True
snake_case__ : Optional[int] = None
snake_case__ : Optional[int] = None
def __call__( self : Dict , UpperCAmelCase__ : Dict ) -> Tuple:
__SCREAMING_SNAKE_CASE = "label" if "label" in features[0].keys() else "labels"
__SCREAMING_SNAKE_CASE = [feature.pop(UpperCAmelCase__ ) for feature in features]
__SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = len(features[0]["input_ids"] )
__SCREAMING_SNAKE_CASE = [
[{k: v[i] for k, v in feature.items()} for i in range(UpperCAmelCase__ )] for feature in features
]
__SCREAMING_SNAKE_CASE = list(chain(*UpperCAmelCase__ ) )
__SCREAMING_SNAKE_CASE = self.tokenizer.pad(
UpperCAmelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
__SCREAMING_SNAKE_CASE = {k: v.view(UpperCAmelCase__ , UpperCAmelCase__ , -1 ) for k, v in batch.items()}
# Add back labels
__SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ , dtype=torch.intaa )
return batch
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , lowerCAmelCase_ , lowerCAmelCase_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase_ )
datasets.utils.logging.set_verbosity(lowerCAmelCase_ )
transformers.utils.logging.set_verbosity(lowerCAmelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__SCREAMING_SNAKE_CASE = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__SCREAMING_SNAKE_CASE = {}
if data_args.train_file is not None:
__SCREAMING_SNAKE_CASE = data_args.train_file
if data_args.validation_file is not None:
__SCREAMING_SNAKE_CASE = data_args.validation_file
__SCREAMING_SNAKE_CASE = data_args.train_file.split("." )[-1]
__SCREAMING_SNAKE_CASE = load_dataset(
lowerCAmelCase_ , data_files=lowerCAmelCase_ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__SCREAMING_SNAKE_CASE = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__SCREAMING_SNAKE_CASE = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__SCREAMING_SNAKE_CASE = [f"""ending{i}""" for i in range(4 )]
__SCREAMING_SNAKE_CASE = "sent1"
__SCREAMING_SNAKE_CASE = "sent2"
if data_args.max_seq_length is None:
__SCREAMING_SNAKE_CASE = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
__SCREAMING_SNAKE_CASE = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
__SCREAMING_SNAKE_CASE = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = [[context] * 4 for context in examples[context_name]]
__SCREAMING_SNAKE_CASE = examples[question_header_name]
__SCREAMING_SNAKE_CASE = [
[f"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(lowerCAmelCase_ )
]
# Flatten out
__SCREAMING_SNAKE_CASE = list(chain(*lowerCAmelCase_ ) )
__SCREAMING_SNAKE_CASE = list(chain(*lowerCAmelCase_ ) )
# Tokenize
__SCREAMING_SNAKE_CASE = tokenizer(
lowerCAmelCase_ , lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowerCAmelCase_ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
__SCREAMING_SNAKE_CASE = raw_datasets["train"]
if data_args.max_train_samples is not None:
__SCREAMING_SNAKE_CASE = min(len(lowerCAmelCase_ ) , data_args.max_train_samples )
__SCREAMING_SNAKE_CASE = train_dataset.select(range(lowerCAmelCase_ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
__SCREAMING_SNAKE_CASE = train_dataset.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
__SCREAMING_SNAKE_CASE = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
__SCREAMING_SNAKE_CASE = min(len(lowerCAmelCase_ ) , data_args.max_eval_samples )
__SCREAMING_SNAKE_CASE = eval_dataset.select(range(lowerCAmelCase_ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
__SCREAMING_SNAKE_CASE = eval_dataset.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__SCREAMING_SNAKE_CASE = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowerCAmelCase_ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = eval_predictions
__SCREAMING_SNAKE_CASE = np.argmax(lowerCAmelCase_ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__SCREAMING_SNAKE_CASE = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , compute_metrics=lowerCAmelCase_ , )
# Training
if training_args.do_train:
__SCREAMING_SNAKE_CASE = None
if training_args.resume_from_checkpoint is not None:
__SCREAMING_SNAKE_CASE = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__SCREAMING_SNAKE_CASE = last_checkpoint
__SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
__SCREAMING_SNAKE_CASE = train_result.metrics
__SCREAMING_SNAKE_CASE = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase_ )
)
__SCREAMING_SNAKE_CASE = min(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
trainer.log_metrics("train" , lowerCAmelCase_ )
trainer.save_metrics("train" , lowerCAmelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__SCREAMING_SNAKE_CASE = trainer.evaluate()
__SCREAMING_SNAKE_CASE = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = min(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
trainer.log_metrics("eval" , lowerCAmelCase_ )
trainer.save_metrics("eval" , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase_ )
else:
trainer.create_model_card(**lowerCAmelCase_ )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 553 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class UpperCamelCase( unittest.TestCase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any]=7 , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : Any=3_0 , SCREAMING_SNAKE_CASE : List[str]=4_0_0 , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : int=0.9 , SCREAMING_SNAKE_CASE : Tuple=None , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : Union[str, Any]=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE : Optional[Any]=[0.5, 0.5, 0.5] , ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = size if size is not None else {"shortest_edge": 3_0}
__snake_case = crop_size if crop_size is not None else {"height": 3_0, "width": 3_0}
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = min_resolution
__snake_case = max_resolution
__snake_case = do_resize_and_center_crop
__snake_case = size
__snake_case = crop_pct
__snake_case = crop_size
__snake_case = do_normalize
__snake_case = image_mean
__snake_case = image_std
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Any:
'''simple docstring'''
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class UpperCamelCase( _a , unittest.TestCase ):
snake_case_ : List[str] = PoolFormerImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
__snake_case = PoolFormerImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Dict:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[str]:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_resize_and_center_crop" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "size" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "crop_pct" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_normalize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "image_mean" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "image_std" ) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 3_0} )
self.assertEqual(image_processor.crop_size , {"height": 3_0, "width": 3_0} )
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__snake_case = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , numpify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__snake_case = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , torchify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__snake_case = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 371 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : List[str] = {
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
A : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 371 | 1 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _snake_case ( a__ , unittest.TestCase ):
# TODO: is there an appropriate internal test set?
snake_case__ = "ssube/stable-diffusion-x4-upscaler-onnx"
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : Dict=0 ):
__lowerCamelCase : int = floats_tensor((1, 3, 128, 128) , rng=random.Random(UpperCAmelCase ) )
__lowerCamelCase : Any = torch.manual_seed(UpperCAmelCase )
__lowerCamelCase : Any = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowerCamelCase__ ( self : List[Any] ):
__lowerCamelCase : Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCamelCase : List[Any] = self.get_dummy_inputs()
__lowerCamelCase : Optional[Any] = pipe(**UpperCAmelCase ).images
__lowerCamelCase : str = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase : Any = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
__lowerCamelCase : Optional[int] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCamelCase : int = self.get_dummy_inputs()
__lowerCamelCase : Any = pipe(**UpperCAmelCase ).images
__lowerCamelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase : int = np.array(
[0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase__ ( self : Optional[int] ):
__lowerCamelCase : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
__lowerCamelCase : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCamelCase : Dict = self.get_dummy_inputs()
__lowerCamelCase : str = pipe(**UpperCAmelCase ).images
__lowerCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase : Optional[Any] = np.array(
[0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase__ ( self : Tuple ):
__lowerCamelCase : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
__lowerCamelCase : Any = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCamelCase : List[str] = self.get_dummy_inputs()
__lowerCamelCase : List[Any] = pipe(**UpperCAmelCase ).images
__lowerCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase : Any = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase__ ( self : Optional[int] ):
__lowerCamelCase : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
__lowerCamelCase : Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCamelCase : str = self.get_dummy_inputs()
__lowerCamelCase : Any = pipe(**UpperCAmelCase ).images
__lowerCamelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase : Union[str, Any] = np.array(
[0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
@property
def lowerCamelCase__ ( self : Optional[int] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase__ ( self : List[Any] ):
__lowerCamelCase : Optional[Any] = ort.SessionOptions()
__lowerCamelCase : int = False
return options
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
__lowerCamelCase : Union[str, Any] = init_image.resize((128, 128) )
# using the PNDM scheduler by default
__lowerCamelCase : List[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCamelCase : Tuple = "A fantasy landscape, trending on artstation"
__lowerCamelCase : Union[str, Any] = torch.manual_seed(0 )
__lowerCamelCase : Union[str, Any] = pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCAmelCase , output_type="np" , )
__lowerCamelCase : Dict = output.images
__lowerCamelCase : Optional[int] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
__lowerCamelCase : Any = np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def lowerCamelCase__ ( self : Optional[int] ):
__lowerCamelCase : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
__lowerCamelCase : int = init_image.resize((128, 128) )
__lowerCamelCase : Tuple = LMSDiscreteScheduler.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , subfolder="scheduler" )
__lowerCamelCase : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , scheduler=UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCamelCase : Any = "A fantasy landscape, trending on artstation"
__lowerCamelCase : str = torch.manual_seed(0 )
__lowerCamelCase : str = pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=UpperCAmelCase , output_type="np" , )
__lowerCamelCase : List[Any] = output.images
__lowerCamelCase : Dict = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
__lowerCamelCase : Optional[Any] = np.array(
[0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 | 702 | """simple docstring"""
from functools import lru_cache
def lowercase_ ( _lowerCamelCase: int ) -> set:
'''simple docstring'''
__lowerCamelCase : Optional[Any] = 2
__lowerCamelCase : Tuple = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(_lowerCamelCase )
if n > 1:
factors.add(_lowerCamelCase )
return factors
@lru_cache
def lowercase_ ( _lowerCamelCase: int ) -> int:
'''simple docstring'''
return len(unique_prime_factors(_lowerCamelCase ) )
def lowercase_ ( _lowerCamelCase: list ) -> bool:
'''simple docstring'''
return len(set(_lowerCamelCase ) ) in (0, 1)
def lowercase_ ( _lowerCamelCase: int ) -> list:
'''simple docstring'''
__lowerCamelCase : str = 2
while True:
# Increment each value of a generated range
__lowerCamelCase : int = [base + i for i in range(_lowerCamelCase )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__lowerCamelCase : Dict = [upf_len(_lowerCamelCase ) for x in group]
checker.append(_lowerCamelCase )
# If all numbers in the list are equal, return the group variable.
if equality(_lowerCamelCase ):
return group
# Increment our base variable by 1
base += 1
def lowercase_ ( _lowerCamelCase: int = 4 ) -> int:
'''simple docstring'''
__lowerCamelCase : Any = run(_lowerCamelCase )
return results[0] if len(_lowerCamelCase ) else None
if __name__ == "__main__":
print(solution()) | 366 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
A = logging.get_logger(__name__)
A = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
A = {
"""vocab_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/vocab.txt""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/vocab.txt""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"""
),
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"""
),
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt""",
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"""
),
"""bert-base-multilingual-cased""": (
"""https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-cased""": (
"""https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"""
),
},
}
A = {
"""bert-base-uncased""": 512,
"""bert-large-uncased""": 512,
"""bert-base-cased""": 512,
"""bert-large-cased""": 512,
"""bert-base-multilingual-uncased""": 512,
"""bert-base-multilingual-cased""": 512,
"""bert-base-chinese""": 512,
"""bert-base-german-cased""": 512,
"""bert-large-uncased-whole-word-masking""": 512,
"""bert-large-cased-whole-word-masking""": 512,
"""bert-large-uncased-whole-word-masking-finetuned-squad""": 512,
"""bert-large-cased-whole-word-masking-finetuned-squad""": 512,
"""bert-base-cased-finetuned-mrpc""": 512,
"""bert-base-german-dbmdz-cased""": 512,
"""bert-base-german-dbmdz-uncased""": 512,
"""TurkuNLP/bert-base-finnish-cased-v1""": 512,
"""TurkuNLP/bert-base-finnish-uncased-v1""": 512,
"""wietsedv/bert-base-dutch-cased""": 512,
}
A = {
"""bert-base-uncased""": {"""do_lower_case""": True},
"""bert-large-uncased""": {"""do_lower_case""": True},
"""bert-base-cased""": {"""do_lower_case""": False},
"""bert-large-cased""": {"""do_lower_case""": False},
"""bert-base-multilingual-uncased""": {"""do_lower_case""": True},
"""bert-base-multilingual-cased""": {"""do_lower_case""": False},
"""bert-base-chinese""": {"""do_lower_case""": False},
"""bert-base-german-cased""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": False},
"""bert-base-cased-finetuned-mrpc""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-cased""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-uncased""": {"""do_lower_case""": True},
"""TurkuNLP/bert-base-finnish-cased-v1""": {"""do_lower_case""": False},
"""TurkuNLP/bert-base-finnish-uncased-v1""": {"""do_lower_case""": True},
"""wietsedv/bert-base-dutch-cased""": {"""do_lower_case""": False},
}
class a__ ( __magic_name__ ):
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_INIT_CONFIGURATION
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = BertTokenizer
def __init__( self : Any , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Tuple=True , UpperCamelCase_ : Union[str, Any]="[UNK]" , UpperCamelCase_ : Union[str, Any]="[SEP]" , UpperCamelCase_ : List[Any]="[PAD]" , UpperCamelCase_ : List[Any]="[CLS]" , UpperCamelCase_ : Tuple="[MASK]" , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Optional[int]=None , **UpperCamelCase_ : Optional[int] , ):
"""simple docstring"""
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
__UpperCAmelCase : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase" , UpperCamelCase_) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCamelCase_) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCamelCase_) != tokenize_chinese_chars
):
__UpperCAmelCase : Dict = getattr(UpperCamelCase_ , normalizer_state.pop("type"))
__UpperCAmelCase : Any = do_lower_case
__UpperCAmelCase : List[str] = strip_accents
__UpperCAmelCase : Any = tokenize_chinese_chars
__UpperCAmelCase : Union[str, Any] = normalizer_class(**UpperCamelCase_)
__UpperCAmelCase : List[Any] = do_lower_case
def a_ ( self : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any]=None):
"""simple docstring"""
__UpperCAmelCase : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a_ ( self : Any , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = [self.sep_token_id]
__UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def a_ ( self : str , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None):
"""simple docstring"""
__UpperCAmelCase : int = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_)
return tuple(UpperCamelCase_)
| 77 |
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def _a (lowercase__ : str , lowercase__ : str , lowercase__ : Optional[str] = None ) -> str:
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
__snake_case = quote(lowercase__ )
return hfh.hf_hub_url(lowercase__ , lowercase__ , repo_type='dataset' , revision=lowercase__ )
| 56 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
a__ : int = None
a__ : List[str] = logging.get_logger(__name__)
a__ : List[Any] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
a__ : Optional[Any] = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
a__ : int = {
'''moussaKam/mbarthez''': 1_0_2_4,
'''moussaKam/barthez''': 1_0_2_4,
'''moussaKam/barthez-orangesum-title''': 1_0_2_4,
}
a__ : Optional[Any] = '''▁'''
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : Union[str, Any] = VOCAB_FILES_NAMES
snake_case__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : List[str] = ["input_ids", "attention_mask"]
snake_case__ : int = BarthezTokenizer
def __init__( self : List[str] , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Dict="<s>" , UpperCAmelCase__ : Dict="</s>" , UpperCAmelCase__ : Optional[Any]="</s>" , UpperCAmelCase__ : Any="<s>" , UpperCAmelCase__ : Tuple="<unk>" , UpperCAmelCase__ : Any="<pad>" , UpperCAmelCase__ : Union[str, Any]="<mask>" , **UpperCAmelCase__ : Tuple , ) -> Any:
# Mask token behave like a normal word, i.e. include the space before it
__SCREAMING_SNAKE_CASE = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , **UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = vocab_file
__SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__SCREAMING_SNAKE_CASE = os.path.join(
UpperCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ):
copyfile(self.vocab_file , UpperCAmelCase__ )
return (out_vocab_file,)
| 703 |
"""simple docstring"""
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ):
'''simple docstring'''
if attention_mask is None:
__SCREAMING_SNAKE_CASE = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__SCREAMING_SNAKE_CASE = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=lowerCAmelCase_ )
if decoder_head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=lowerCAmelCase_ )
if cross_attn_head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=lowerCAmelCase_ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict=1_3 , UpperCAmelCase__ : List[str]=7 , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : Optional[int]=9_9 , UpperCAmelCase__ : Dict=1_6 , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : str=4 , UpperCAmelCase__ : str=4 , UpperCAmelCase__ : Optional[int]="relu" , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : List[str]=2_0 , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : Any=1 , UpperCAmelCase__ : Union[str, Any]=0 , ) -> Any:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = encoder_layerdrop
__SCREAMING_SNAKE_CASE = decoder_layerdrop
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = pad_token_id
__SCREAMING_SNAKE_CASE = bos_token_id
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = self.eos_token_id # Eos Token
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__SCREAMING_SNAKE_CASE = input_ids.clamp(self.pad_token_id + 1 )
__SCREAMING_SNAKE_CASE = decoder_input_ids.clamp(self.pad_token_id + 1 )
__SCREAMING_SNAKE_CASE = self.get_config()
__SCREAMING_SNAKE_CASE = prepare_mam_aaa_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return config, inputs_dict
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = MaMaaaModel(config=UpperCAmelCase__ ).get_decoder().to(UpperCAmelCase__ ).eval()
__SCREAMING_SNAKE_CASE = inputs_dict["input_ids"]
__SCREAMING_SNAKE_CASE = inputs_dict["attention_mask"]
__SCREAMING_SNAKE_CASE = inputs_dict["head_mask"]
# first forward pass
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , head_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
__SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
__SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 )
__SCREAMING_SNAKE_CASE = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )["last_hidden_state"]
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ )[
"last_hidden_state"
]
# select random slice
__SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach()
__SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-2 ) )
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = MaMaaaModel(config=UpperCAmelCase__ ).to(UpperCAmelCase__ ).eval()
__SCREAMING_SNAKE_CASE = model(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = outputs.encoder_last_hidden_state
__SCREAMING_SNAKE_CASE = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = model.get_encoder()
encoder.save_pretrained(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = MaMaaaEncoder.from_pretrained(UpperCAmelCase__ ).to(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = model.get_decoder()
decoder.save_pretrained(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = MaMaaaDecoder.from_pretrained(UpperCAmelCase__ ).to(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = decoder(
input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=inputs_dict["attention_mask"] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
snake_case__ : Tuple = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
snake_case__ : Dict = (
{
"conversational": MaMaaaForConditionalGeneration,
"feature-extraction": MaMaaaModel,
"summarization": MaMaaaForConditionalGeneration,
"text2text-generation": MaMaaaForConditionalGeneration,
"translation": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
snake_case__ : Optional[int] = True
snake_case__ : Optional[Any] = True
snake_case__ : Union[str, Any] = False
snake_case__ : Tuple = False
def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict ) -> Optional[Any]:
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def UpperCAmelCase_ ( self : Dict ) -> List[str]:
__SCREAMING_SNAKE_CASE = MaMaaaModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = model_class.from_pretrained(UpperCAmelCase__ , output_loading_info=UpperCAmelCase__ )
self.assertEqual(info["missing_keys"] , [] )
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
__SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = copy.deepcopy(self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
if not self.is_encoder_decoder:
__SCREAMING_SNAKE_CASE = inputs["input_ids"]
del inputs["input_ids"]
else:
__SCREAMING_SNAKE_CASE = inputs["input_ids"]
__SCREAMING_SNAKE_CASE = inputs.get("decoder_input_ids" , UpperCAmelCase__ )
del inputs["input_ids"]
inputs.pop("decoder_input_ids" , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model.get_input_embeddings()
if not self.is_encoder_decoder:
__SCREAMING_SNAKE_CASE = wte(UpperCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE = wte(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = wte(UpperCAmelCase__ )
with torch.no_grad():
model(**UpperCAmelCase__ )[0]
def UpperCAmelCase_ ( self : Optional[Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = input_dict["input_ids"]
__SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = MaMaaaForConditionalGeneration(UpperCAmelCase__ ).eval().to(UpperCAmelCase__ )
if torch_device == "cuda":
model.half()
model.generate(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
model.generate(num_beams=4 , do_sample=UpperCAmelCase__ , early_stopping=UpperCAmelCase__ , num_return_sequences=3 )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return torch.tensor(lowerCAmelCase_ , dtype=torch.long , device=lowerCAmelCase_ )
a__ : Union[str, Any] = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" )
def UpperCAmelCase_ ( self : Dict ) -> List[Any]:
__SCREAMING_SNAKE_CASE = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
__SCREAMING_SNAKE_CASE = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
__SCREAMING_SNAKE_CASE = prepare_mam_aaa_inputs_dict(model.config , UpperCAmelCase__ , UpperCAmelCase__ )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**UpperCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE = torch.Size((1, 1_1, 1_0_2_4) )
self.assertEqual(output.shape , UpperCAmelCase__ )
# change to expected output here
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-0.7_780, -0.1_676, 0.1_038], [-6.7_556, -1.3_992, 0.0_567], [-7.5_383, -0.5_920, -0.2_779]] , device=UpperCAmelCase__ )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=UpperCAmelCase__ ) )
def UpperCAmelCase_ ( self : Tuple ) -> str:
__SCREAMING_SNAKE_CASE = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(UpperCAmelCase__ )
# change to intended input
__SCREAMING_SNAKE_CASE = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
__SCREAMING_SNAKE_CASE = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
__SCREAMING_SNAKE_CASE = prepare_mam_aaa_inputs_dict(model.config , UpperCAmelCase__ , UpperCAmelCase__ )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**UpperCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE = torch.Size((1, 1_1, model.config.vocab_size) )
self.assertEqual(output.shape , UpperCAmelCase__ )
# change to expected output here
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-1.0_448, -1.0_411, 3.7_992], [-3.2_191, -3.2_386, -1.3_451], [-3.6_210, -3.5_993, 0.4_925]] , device=UpperCAmelCase__ )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=UpperCAmelCase__ ) )
def UpperCAmelCase_ ( self : Tuple ) -> Any:
__SCREAMING_SNAKE_CASE = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" )
__SCREAMING_SNAKE_CASE = [
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"
" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"
" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
__SCREAMING_SNAKE_CASE = tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors="pt" )
__SCREAMING_SNAKE_CASE = model.generate(
input_ids=dct["input_ids"].to(UpperCAmelCase__ ) , attention_mask=dct["attention_mask"].to(UpperCAmelCase__ ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , )
__SCREAMING_SNAKE_CASE = [
"The NSA case highlights the total absence of intelligence debate",
"I think there are two levels of response from the French government.",
"When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."
" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"
" communications in France.",
]
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
assert generated == expected_en
| 553 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class _UpperCAmelCase ( __A ):
__lowerCamelCase: str = 'marian'
__lowerCamelCase: Optional[int] = ['past_key_values']
__lowerCamelCase: Dict = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Dict , a : List[Any]=5_8_1_0_1 , a : Optional[Any]=None , a : Optional[Any]=1_0_2_4 , a : Dict=1_2 , a : int=4_0_9_6 , a : Optional[Any]=1_6 , a : Any=1_2 , a : str=4_0_9_6 , a : int=1_6 , a : Union[str, Any]=0.0 , a : Any=0.0 , a : List[Any]=True , a : Any=True , a : Optional[Any]="gelu" , a : int=1_0_2_4 , a : Optional[Any]=0.1 , a : Optional[Any]=0.0 , a : Optional[Any]=0.0 , a : str=0.02 , a : Any=5_8_1_0_0 , a : Dict=False , a : Optional[Any]=5_8_1_0_0 , a : Optional[int]=0 , a : Tuple=0 , a : List[Any]=True , **a : str , ):
'''simple docstring'''
lowercase_ : Optional[int] = vocab_size
lowercase_ : Tuple = decoder_vocab_size or vocab_size
lowercase_ : Tuple = max_position_embeddings
lowercase_ : Union[str, Any] = d_model
lowercase_ : Union[str, Any] = encoder_ffn_dim
lowercase_ : Optional[Any] = encoder_layers
lowercase_ : List[str] = encoder_attention_heads
lowercase_ : Tuple = decoder_ffn_dim
lowercase_ : Optional[Any] = decoder_layers
lowercase_ : Optional[int] = decoder_attention_heads
lowercase_ : str = dropout
lowercase_ : str = attention_dropout
lowercase_ : Dict = activation_dropout
lowercase_ : str = activation_function
lowercase_ : Tuple = init_std
lowercase_ : Optional[Any] = encoder_layerdrop
lowercase_ : int = decoder_layerdrop
lowercase_ : Optional[Any] = use_cache
lowercase_ : str = encoder_layers
lowercase_ : int = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase_ : Optional[Any] = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=a , eos_token_id=a , is_encoder_decoder=a , decoder_start_token_id=a , forced_eos_token_id=a , **a , )
class _UpperCAmelCase ( __A ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowercase_ : Optional[int] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
lowercase_ : Dict = {0: 'batch'}
lowercase_ : str = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
lowercase_ : Tuple = {0: 'batch', 1: 'decoder_sequence'}
lowercase_ : Union[str, Any] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(a , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowercase_ : Any = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
lowercase_ : str = self.num_layers
for i in range(a ):
lowercase_ : Optional[int] = {0: 'batch', 2: 'past_sequence + sequence'}
lowercase_ : Union[str, Any] = {0: 'batch', 2: 'past_sequence + sequence'}
else:
lowercase_ : Optional[int] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowercase_ : str = super().outputs
else:
lowercase_ : Union[str, Any] = super(a , self ).outputs
if self.use_past:
lowercase_ : Optional[Any] = self.num_layers
for i in range(a ):
lowercase_ : List[str] = {0: 'batch', 2: 'past_sequence + sequence'}
lowercase_ : Any = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def lowerCAmelCase__ ( self : Optional[Any] , a : Optional[Any] , a : Optional[Any] = -1 , a : Optional[int] = -1 , a : int = False , a : List[str] = None , ):
'''simple docstring'''
lowercase_ : Optional[Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
a , a , a , a , a )
# Generate decoder inputs
lowercase_ : List[str] = seq_length if not self.use_past else 1
lowercase_ : Optional[Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
a , a , a , a , a )
lowercase_ : Tuple = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
lowercase_ : List[str] = dict(**a , **a )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowercase_ : Optional[int] = common_inputs['input_ids'].shape
lowercase_ : Any = common_inputs['decoder_input_ids'].shape[1]
lowercase_ : List[str] = self.num_attention_heads
lowercase_ : Optional[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase_ : Union[str, Any] = decoder_seq_length + 3
lowercase_ : int = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowercase_ : List[str] = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(a , a )] , dim=1 )
lowercase_ : int = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowercase_ : List[Any] = self.num_layers
lowercase_ : Union[str, Any] = min(a , a )
lowercase_ : Optional[Any] = max(a , a ) - min_num_layers
lowercase_ : Dict = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(a ):
common_inputs["past_key_values"].append(
(
torch.zeros(a ),
torch.zeros(a ),
torch.zeros(a ),
torch.zeros(a ),
) )
# TODO: test this.
lowercase_ : Optional[int] = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(a , a ):
common_inputs["past_key_values"].append((torch.zeros(a ), torch.zeros(a )) )
return common_inputs
def lowerCAmelCase__ ( self : Tuple , a : str , a : Optional[int] = -1 , a : Dict = -1 , a : List[Any] = False , a : str = None , ):
'''simple docstring'''
lowercase_ : Any = self._generate_dummy_inputs_for_encoder_and_decoder(
a , a , a , a , a )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowercase_ : List[Any] = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
lowercase_ : List[str] = seqlen + 2
lowercase_ : Union[str, Any] = self.num_layers
lowercase_ : List[Any] = self.num_attention_heads
lowercase_ : List[str] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase_ : Tuple = common_inputs['attention_mask'].dtype
lowercase_ : Union[str, Any] = torch.cat(
[common_inputs["attention_mask"], torch.ones(a , a , dtype=a )] , dim=1 )
lowercase_ : Optional[Any] = [
(torch.zeros(a ), torch.zeros(a )) for _ in range(a )
]
return common_inputs
def lowerCAmelCase__ ( self : int , a : Optional[Any] , a : Union[str, Any] = -1 , a : Any = -1 , a : int = False , a : Tuple = None , ):
'''simple docstring'''
lowercase_ : Union[str, Any] = compute_effective_axis_dimension(
a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase_ : List[str] = tokenizer.num_special_tokens_to_add(a )
lowercase_ : List[Any] = compute_effective_axis_dimension(
a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=a )
# Generate dummy inputs according to compute batch and sequence
lowercase_ : Tuple = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowercase_ : Optional[Any] = dict(tokenizer(a , return_tensors=a ) )
return common_inputs
def lowerCAmelCase__ ( self : List[Any] , a : Optional[Any] , a : str = -1 , a : List[str] = -1 , a : List[str] = False , a : List[Any] = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowercase_ : Optional[int] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
a , batch_size=a , seq_length=a , is_pair=a , framework=a )
else:
lowercase_ : str = self._generate_dummy_inputs_for_causal_lm(
a , batch_size=a , seq_length=a , is_pair=a , framework=a )
return common_inputs
def lowerCAmelCase__ ( self : Union[str, Any] , a : Dict , a : List[Any] , a : Tuple , a : List[str] ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowercase_ : Any = super()._flatten_past_key_values_(a , a , a , a )
else:
lowercase_ : Optional[int] = super(a , self )._flatten_past_key_values_(
a , a , a , a )
@property
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
return 1e-4
| 620 | # flake8: noqa
# Lint as: python3
_UpperCAmelCase = [
"""VerificationMode""",
"""Version""",
"""disable_progress_bar""",
"""enable_progress_bar""",
"""is_progress_bar_enabled""",
"""experimental""",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 558 | 0 |
"""simple docstring"""
import os
def snake_case__ ( __lowerCamelCase : str = "matrix.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(__lowerCamelCase ) , __lowerCamelCase ) ) as in_file:
lowerCamelCase__ : str =in_file.read()
lowerCamelCase__ : List[str] =[[int(__lowerCamelCase ) for cell in row.split(''',''' )] for row in data.strip().splitlines()]
lowerCamelCase__ : Optional[Any] =[[0 for cell in row] for row in grid]
lowerCamelCase__ : Union[str, Any] =len(grid[0] )
lowerCamelCase__ : Union[str, Any] =[[0 for i in range(__lowerCamelCase )] for j in range(__lowerCamelCase )]
lowerCamelCase__ : Optional[Any] =grid[0][0]
for i in range(1 , __lowerCamelCase ):
lowerCamelCase__ : Dict =grid[0][i] + dp[0][i - 1]
for i in range(1 , __lowerCamelCase ):
lowerCamelCase__ : Dict =grid[i][0] + dp[i - 1][0]
for i in range(1 , __lowerCamelCase ):
for j in range(1 , __lowerCamelCase ):
lowerCamelCase__ : Optional[Any] =grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(f'{solution() = }')
| 625 |
"""simple docstring"""
_lowercase : Optional[Any] = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 625 | 1 |
"""simple docstring"""
__A = "0.18.2"
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 346 |
'''simple docstring'''
from __future__ import annotations
def __a(SCREAMING_SNAKE_CASE_ : list ):
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(SCREAMING_SNAKE_CASE_ ) / len(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
_lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCAmelCase = '''
Examples:
```py
>>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> negative_image_emb = out.negative_image_embeds
>>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")
>>> pipe.to("cuda")
>>> image = pipe(
... prompt,
... image_embeds=image_emb,
... negative_image_embeds=negative_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... ).images
>>> image[0].save("cat.png")
```
'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase=8 ):
"""simple docstring"""
lowerCAmelCase__ : Any = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
lowerCAmelCase__ : List[str] = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,) -> int:
super().__init__()
self.register_modules(
text_encoder=__UpperCAmelCase ,tokenizer=__UpperCAmelCase ,unet=__UpperCAmelCase ,scheduler=__UpperCAmelCase ,movq=__UpperCAmelCase ,)
lowerCAmelCase__ : int = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[int]:
if latents is None:
lowerCAmelCase__ : Optional[int] = randn_tensor(__UpperCAmelCase ,generator=__UpperCAmelCase ,device=__UpperCAmelCase ,dtype=__UpperCAmelCase )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
lowerCAmelCase__ : int = latents.to(__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=None ,) -> Any:
lowerCAmelCase__ : List[str] = len(__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else 1
# get prompt text embeddings
lowerCAmelCase__ : Any = self.tokenizer(
__UpperCAmelCase ,padding="""max_length""" ,truncation=__UpperCAmelCase ,max_length=77 ,return_attention_mask=__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ,return_tensors="""pt""" ,)
lowerCAmelCase__ : Tuple = text_inputs.input_ids
lowerCAmelCase__ : Optional[int] = self.tokenizer(__UpperCAmelCase ,padding="""longest""" ,return_tensors="""pt""" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCAmelCase__ : Optional[int] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowerCAmelCase__ : List[Any] = text_input_ids.to(__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = text_inputs.attention_mask.to(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.text_encoder(
input_ids=__UpperCAmelCase ,attention_mask=__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = prompt_embeds.repeat_interleave(__UpperCAmelCase ,dim=0 )
lowerCAmelCase__ : Optional[int] = text_encoder_hidden_states.repeat_interleave(__UpperCAmelCase ,dim=0 )
lowerCAmelCase__ : Union[str, Any] = text_mask.repeat_interleave(__UpperCAmelCase ,dim=0 )
if do_classifier_free_guidance:
lowerCAmelCase__ : List[str]
if negative_prompt is None:
lowerCAmelCase__ : Tuple = [""""""] * batch_size
elif type(__UpperCAmelCase ) is not type(__UpperCAmelCase ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCAmelCase )} !="""
F""" {type(__UpperCAmelCase )}.""" )
elif isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCAmelCase__ : Union[str, Any] = [negative_prompt]
elif batch_size != len(__UpperCAmelCase ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(__UpperCAmelCase )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
""" the batch size of `prompt`.""" )
else:
lowerCAmelCase__ : int = negative_prompt
lowerCAmelCase__ : List[str] = self.tokenizer(
__UpperCAmelCase ,padding="""max_length""" ,max_length=77 ,truncation=__UpperCAmelCase ,return_attention_mask=__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ,return_tensors="""pt""" ,)
lowerCAmelCase__ : Dict = uncond_input.input_ids.to(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = uncond_input.attention_mask.to(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ : int = self.text_encoder(
input_ids=__UpperCAmelCase ,attention_mask=__UpperCAmelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase__ : Optional[int] = negative_prompt_embeds.shape[1]
lowerCAmelCase__ : Optional[Any] = negative_prompt_embeds.repeat(1 ,__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt ,__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = uncond_text_encoder_hidden_states.shape[1]
lowerCAmelCase__ : Tuple = uncond_text_encoder_hidden_states.repeat(1 ,__UpperCAmelCase ,1 )
lowerCAmelCase__ : Optional[int] = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt ,__UpperCAmelCase ,-1 )
lowerCAmelCase__ : List[str] = uncond_text_mask.repeat_interleave(__UpperCAmelCase ,dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCAmelCase__ : Union[str, Any] = torch.cat([negative_prompt_embeds, prompt_embeds] )
lowerCAmelCase__ : Tuple = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
lowerCAmelCase__ : Tuple = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def UpperCAmelCase_ ( self ,__UpperCAmelCase=0 ) -> Optional[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
lowerCAmelCase__ : Any = torch.device(F"""cuda:{gpu_id}""" )
lowerCAmelCase__ : Union[str, Any] = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__UpperCAmelCase ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase=0 ) -> Optional[int]:
if is_accelerate_available() and is_accelerate_version(""">=""" ,"""0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
lowerCAmelCase__ : List[str] = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("""cpu""" ,silence_dtype_warnings=__UpperCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCAmelCase__ : Optional[int] = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = cpu_offload_with_hook(__UpperCAmelCase ,__UpperCAmelCase ,prev_module_hook=__UpperCAmelCase )
if self.safety_checker is not None:
lowerCAmelCase__ , lowerCAmelCase__ : Dict = cpu_offload_with_hook(self.safety_checker ,__UpperCAmelCase ,prev_module_hook=__UpperCAmelCase )
# We'll offload the last model manually.
lowerCAmelCase__ : Union[str, Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase_ ( self ) -> Optional[int]:
if not hasattr(self.unet ,"""_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__UpperCAmelCase ,"""_hf_hook""" )
and hasattr(module._hf_hook ,"""execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__UpperCAmelCase )
def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = 512 ,__UpperCAmelCase = 512 ,__UpperCAmelCase = 100 ,__UpperCAmelCase = 4.0 ,__UpperCAmelCase = 1 ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = "pil" ,__UpperCAmelCase = True ,) -> Tuple:
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCAmelCase__ : Dict = 1
elif isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCAmelCase__ : Dict = len(__UpperCAmelCase )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(__UpperCAmelCase )}""" )
lowerCAmelCase__ : str = self._execution_device
lowerCAmelCase__ : List[Any] = batch_size * num_images_per_prompt
lowerCAmelCase__ : int = guidance_scale > 1.0
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = self._encode_prompt(
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCAmelCase__ : List[str] = torch.cat(__UpperCAmelCase ,dim=0 )
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCAmelCase__ : Optional[Any] = torch.cat(__UpperCAmelCase ,dim=0 )
if do_classifier_free_guidance:
lowerCAmelCase__ : Union[str, Any] = image_embeds.repeat_interleave(__UpperCAmelCase ,dim=0 )
lowerCAmelCase__ : Optional[Any] = negative_image_embeds.repeat_interleave(__UpperCAmelCase ,dim=0 )
lowerCAmelCase__ : Any = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(
dtype=prompt_embeds.dtype ,device=__UpperCAmelCase )
self.scheduler.set_timesteps(__UpperCAmelCase ,device=__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = self.scheduler.timesteps
lowerCAmelCase__ : Dict = self.unet.config.in_channels
lowerCAmelCase__ , lowerCAmelCase__ : Any = get_new_h_w(__UpperCAmelCase ,__UpperCAmelCase ,self.movq_scale_factor )
# create initial latent
lowerCAmelCase__ : Dict = self.prepare_latents(
(batch_size, num_channels_latents, height, width) ,text_encoder_hidden_states.dtype ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,self.scheduler ,)
for i, t in enumerate(self.progress_bar(__UpperCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase__ : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase__ : Optional[Any] = {"""text_embeds""": prompt_embeds, """image_embeds""": image_embeds}
lowerCAmelCase__ : Any = self.unet(
sample=__UpperCAmelCase ,timestep=__UpperCAmelCase ,encoder_hidden_states=__UpperCAmelCase ,added_cond_kwargs=__UpperCAmelCase ,return_dict=__UpperCAmelCase ,)[0]
if do_classifier_free_guidance:
lowerCAmelCase__ , lowerCAmelCase__ : Any = noise_pred.split(latents.shape[1] ,dim=1 )
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = noise_pred.chunk(2 )
lowerCAmelCase__ , lowerCAmelCase__ : int = variance_pred.chunk(2 )
lowerCAmelCase__ : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCAmelCase__ : str = torch.cat([noise_pred, variance_pred_text] ,dim=1 )
if not (
hasattr(self.scheduler.config ,"""variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCAmelCase__ , lowerCAmelCase__ : Any = noise_pred.split(latents.shape[1] ,dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase__ : Optional[int] = self.scheduler.step(
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,generator=__UpperCAmelCase ,).prev_sample
# post-processing
lowerCAmelCase__ : int = self.movq.decode(__UpperCAmelCase ,force_not_quantize=__UpperCAmelCase )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowerCAmelCase__ : Tuple = image * 0.5 + 0.5
lowerCAmelCase__ : str = image.clamp(0 ,1 )
lowerCAmelCase__ : List[Any] = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
lowerCAmelCase__ : int = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCAmelCase )
| 160 |
'''simple docstring'''
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=13 ,__UpperCAmelCase=7 ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=99 ,__UpperCAmelCase=32 ,__UpperCAmelCase=5 ,__UpperCAmelCase=4 ,__UpperCAmelCase=37 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=512 ,__UpperCAmelCase=16 ,__UpperCAmelCase=2 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=False ,__UpperCAmelCase=True ,__UpperCAmelCase="None" ,__UpperCAmelCase=3 ,__UpperCAmelCase=4 ,__UpperCAmelCase=None ,) -> List[Any]:
lowerCAmelCase__ : int = parent
lowerCAmelCase__ : Tuple = batch_size
lowerCAmelCase__ : int = seq_length
lowerCAmelCase__ : Dict = is_training
lowerCAmelCase__ : List[str] = use_input_mask
lowerCAmelCase__ : Optional[Any] = use_token_type_ids
lowerCAmelCase__ : Dict = use_labels
lowerCAmelCase__ : Tuple = vocab_size
lowerCAmelCase__ : Optional[Any] = hidden_size
lowerCAmelCase__ : Optional[int] = num_hidden_layers
lowerCAmelCase__ : List[Any] = num_attention_heads
lowerCAmelCase__ : List[str] = intermediate_size
lowerCAmelCase__ : str = hidden_act
lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase__ : int = attention_probs_dropout_prob
lowerCAmelCase__ : List[Any] = max_position_embeddings
lowerCAmelCase__ : Tuple = type_vocab_size
lowerCAmelCase__ : Tuple = type_sequence_label_size
lowerCAmelCase__ : List[Any] = initializer_range
lowerCAmelCase__ : int = num_labels
lowerCAmelCase__ : Dict = num_choices
lowerCAmelCase__ : List[str] = relative_attention
lowerCAmelCase__ : Any = position_biased_input
lowerCAmelCase__ : str = pos_att_type
lowerCAmelCase__ : Optional[int] = scope
def UpperCAmelCase_ ( self ) -> Dict:
lowerCAmelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowerCAmelCase__ : Union[str, Any] = None
if self.use_input_mask:
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
lowerCAmelCase__ : List[Any] = None
if self.use_token_type_ids:
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : Optional[Any] = None
if self.use_labels:
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowerCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowerCAmelCase__ : Optional[int] = ids_tensor([self.batch_size] ,self.num_choices )
lowerCAmelCase__ : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self ) -> Any:
return DebertaVaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,relative_attention=self.relative_attention ,position_biased_input=self.position_biased_input ,pos_att_type=self.pos_att_type ,)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]:
self.parent.assertListEqual(list(result.loss.size() ) ,[] )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[int]:
lowerCAmelCase__ : Optional[int] = DebertaVaModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Optional[Any] = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,token_type_ids=__UpperCAmelCase )[0]
lowerCAmelCase__ : List[str] = model(__UpperCAmelCase ,token_type_ids=__UpperCAmelCase )[0]
lowerCAmelCase__ : Optional[Any] = model(__UpperCAmelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) ,[self.batch_size, self.seq_length, self.hidden_size] )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Union[str, Any]:
lowerCAmelCase__ : Dict = DebertaVaForMaskedLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Any = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,token_type_ids=__UpperCAmelCase ,labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> str:
lowerCAmelCase__ : Optional[int] = self.num_labels
lowerCAmelCase__ : Dict = DebertaVaForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : List[str] = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,token_type_ids=__UpperCAmelCase ,labels=__UpperCAmelCase )
self.parent.assertListEqual(list(result.logits.size() ) ,[self.batch_size, self.num_labels] )
self.check_loss_output(__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> List[Any]:
lowerCAmelCase__ : str = self.num_labels
lowerCAmelCase__ : Any = DebertaVaForTokenClassification(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,token_type_ids=__UpperCAmelCase ,labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Union[str, Any]:
lowerCAmelCase__ : Dict = DebertaVaForQuestionAnswering(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : List[str] = model(
__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,token_type_ids=__UpperCAmelCase ,start_positions=__UpperCAmelCase ,end_positions=__UpperCAmelCase ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[Any]:
lowerCAmelCase__ : List[str] = DebertaVaForMultipleChoice(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : str = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
lowerCAmelCase__ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
lowerCAmelCase__ : Any = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
lowerCAmelCase__ : Any = model(
__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,token_type_ids=__UpperCAmelCase ,labels=__UpperCAmelCase ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def UpperCAmelCase_ ( self ) -> int:
lowerCAmelCase__ : Dict = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Union[str, Any] = config_and_inputs
lowerCAmelCase__ : Optional[int] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
__lowercase : int = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
__lowercase : int = (
{
'''feature-extraction''': DebertaVaModel,
'''fill-mask''': DebertaVaForMaskedLM,
'''question-answering''': DebertaVaForQuestionAnswering,
'''text-classification''': DebertaVaForSequenceClassification,
'''token-classification''': DebertaVaForTokenClassification,
'''zero-shot''': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase : Any = True
__lowercase : Tuple = False
__lowercase : int = False
__lowercase : List[Any] = False
__lowercase : Any = False
def UpperCAmelCase_ ( self ) -> Tuple:
lowerCAmelCase__ : List[Any] = DebertaVaModelTester(self )
lowerCAmelCase__ : Union[str, Any] = ConfigTester(self ,config_class=__UpperCAmelCase ,hidden_size=37 )
def UpperCAmelCase_ ( self ) -> int:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> str:
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Any:
lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__UpperCAmelCase )
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Dict = DebertaVaModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason="""Model not available yet""" )
def UpperCAmelCase_ ( self ) -> List[str]:
pass
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
lowerCAmelCase__ : Optional[int] = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" )
lowerCAmelCase__ : List[Any] = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
lowerCAmelCase__ : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase__ : Tuple = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase )[0]
# compare the actual values for a slice.
lowerCAmelCase__ : List[str] = torch.tensor(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,__UpperCAmelCase ,atol=1E-4 ) ,F"""{output[:, 1:4, 1:4]}""" )
| 160 | 1 |
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class _UpperCamelCase( yaml.SafeLoader ):
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a : Dict = [self.constructed_objects[key_node] for key_node, _ in node.value]
__a : Dict = [tuple(SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else key for key in keys]
__a : Optional[int] = Counter(SCREAMING_SNAKE_CASE__ )
__a : Dict = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''' )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str]=False ):
'''simple docstring'''
__a : List[Any] = super().construct_mapping(SCREAMING_SNAKE_CASE__ , deep=SCREAMING_SNAKE_CASE__ )
self._check_no_duplicates_on_constructed_node(SCREAMING_SNAKE_CASE__ )
return mapping
def UpperCAmelCase__ ( lowerCamelCase_ : str ):
__a : List[str] = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
__a : int = full_content[1:].index('---' ) + 1
__a : Any = '\n'.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowerCamelCase_ )
class _UpperCamelCase( __lowerCamelCase ):
# class attributes
__SCREAMING_SNAKE_CASE : List[Any] = {'''train_eval_index'''} # train-eval-index in the YAML metadata
@classmethod
def __lowerCAmelCase ( cls : Dict , SCREAMING_SNAKE_CASE__ : Path ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE__ , encoding='utf-8' ) as readme_file:
__a , __a : Optional[Any] = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(SCREAMING_SNAKE_CASE__ )
else:
return cls()
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Path ):
'''simple docstring'''
if path.exists():
with open(SCREAMING_SNAKE_CASE__ , encoding='utf-8' ) as readme_file:
__a : Optional[Any] = readme_file.read()
else:
__a : str = None
__a : Optional[int] = self._to_readme(SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , 'w' , encoding='utf-8' ) as readme_file:
readme_file.write(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
'''simple docstring'''
if readme_content is not None:
__a , __a : str = _split_yaml_from_readme(SCREAMING_SNAKE_CASE__ )
__a : List[str] = '---\n' + self.to_yaml_string() + '---\n' + content
else:
__a : Tuple = '---\n' + self.to_yaml_string() + '---\n'
return full_content
@classmethod
def __lowerCAmelCase ( cls : str , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
__a : Any = yaml.load(SCREAMING_SNAKE_CASE__ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
__a : str = {
(key.replace('-' , '_' ) if key.replace('-' , '_' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace('_' , '-' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=SCREAMING_SNAKE_CASE__ , allow_unicode=SCREAMING_SNAKE_CASE__ , encoding='utf-8' , ).decode('utf-8' )
SCREAMING_SNAKE_CASE__ = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
SCREAMING_SNAKE_CASE__ = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
SCREAMING_SNAKE_CASE__ = ap.parse_args()
SCREAMING_SNAKE_CASE__ = Path(args.readme_filepath)
SCREAMING_SNAKE_CASE__ = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 47 | '''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCamelCase : int , _lowerCamelCase : int ):
if b == 0:
return (1, 0)
((_lowerCAmelCase) , (_lowerCAmelCase)) = extended_euclid(_lowerCamelCase , a % b )
_lowerCAmelCase = a // b
return (y, x - k * y)
def A_ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ):
((_lowerCAmelCase) , (_lowerCAmelCase)) = extended_euclid(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase = na * na
_lowerCAmelCase = ra * x * na + ra * y * na
return (n % m + m) % m
def A_ ( _lowerCamelCase : int , _lowerCamelCase : int ):
((_lowerCAmelCase) , (_lowerCAmelCase)) = extended_euclid(_lowerCamelCase , _lowerCamelCase )
if b < 0:
_lowerCAmelCase = (b % n + n) % n
return b
def A_ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ):
_lowerCAmelCase , _lowerCAmelCase = invert_modulo(_lowerCamelCase , _lowerCamelCase ), invert_modulo(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase = na * na
_lowerCAmelCase = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 309 | 0 |
'''simple docstring'''
def _lowerCAmelCase( UpperCAmelCase_ : str ) -> int:
assert column_title.isupper()
lowerCAmelCase__ = 0
lowerCAmelCase__ = len(UpperCAmelCase_ ) - 1
lowerCAmelCase__ = 0
while index >= 0:
lowerCAmelCase__ = (ord(column_title[index] ) - 64) * pow(26 , UpperCAmelCase_ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 702 |
'''simple docstring'''
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = """T5Config"""
class lowerCamelCase__ ( _A ):
'''simple docstring'''
A__ = '''mt5'''
A__ = MTaConfig
class lowerCamelCase__ ( _A ):
'''simple docstring'''
A__ = '''mt5'''
A__ = MTaConfig
class lowerCamelCase__ ( _A ):
'''simple docstring'''
A__ = '''mt5'''
A__ = MTaConfig
| 211 | 0 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_a : List[str] = logging.get_logger(__name__)
def snake_case__ ( UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : str ):
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def snake_case__ ( UpperCAmelCase : np.ndarray , UpperCAmelCase : Optional[str] , UpperCAmelCase : Optional[str] ):
lowerCAmelCase__ :int = to_pil_image(UpperCAmelCase )
lowerCAmelCase__ ,lowerCAmelCase__ :List[str] = pil_image.size
lowerCAmelCase__ :str = pytesseract.image_to_data(UpperCAmelCase , lang=UpperCAmelCase , output_type="dict" , config=UpperCAmelCase )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ :Optional[int] = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
lowerCAmelCase__ :Dict = [idx for idx, word in enumerate(UpperCAmelCase ) if not word.strip()]
lowerCAmelCase__ :Any = [word for idx, word in enumerate(UpperCAmelCase ) if idx not in irrelevant_indices]
lowerCAmelCase__ :Tuple = [coord for idx, coord in enumerate(UpperCAmelCase ) if idx not in irrelevant_indices]
lowerCAmelCase__ :str = [coord for idx, coord in enumerate(UpperCAmelCase ) if idx not in irrelevant_indices]
lowerCAmelCase__ :Optional[Any] = [coord for idx, coord in enumerate(UpperCAmelCase ) if idx not in irrelevant_indices]
lowerCAmelCase__ :List[str] = [coord for idx, coord in enumerate(UpperCAmelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowerCAmelCase__ :List[str] = []
for x, y, w, h in zip(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCAmelCase__ :Optional[int] = [x, y, x + w, y + h]
actual_boxes.append(UpperCAmelCase )
# finally, normalize the bounding boxes
lowerCAmelCase__ :str = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) )
assert len(UpperCAmelCase ) == len(UpperCAmelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class _UpperCAmelCase ( _A ):
"""simple docstring"""
A = ['''pixel_values''']
def __init__( self , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = PILImageResampling.BILINEAR , _lowerCAmelCase = True , _lowerCAmelCase = 1 / 255 , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = "" , **_lowerCAmelCase , ):
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
lowerCAmelCase__ :int = size if size is not None else {"height": 224, "width": 224}
lowerCAmelCase__ :str = get_size_dict(_lowerCAmelCase )
lowerCAmelCase__ :List[Any] = do_resize
lowerCAmelCase__ :List[str] = size
lowerCAmelCase__ :Any = resample
lowerCAmelCase__ :Union[str, Any] = do_rescale
lowerCAmelCase__ :int = rescale_value
lowerCAmelCase__ :Optional[int] = do_normalize
lowerCAmelCase__ :Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase__ :Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
lowerCAmelCase__ :Union[str, Any] = apply_ocr
lowerCAmelCase__ :Any = ocr_lang
lowerCAmelCase__ :Tuple = tesseract_config
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = PILImageResampling.BILINEAR , _lowerCAmelCase = None , **_lowerCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
lowerCAmelCase__ :int = (size["height"], size["width"])
return resize(_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ):
'''simple docstring'''
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ):
'''simple docstring'''
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase=None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = ChannelDimension.FIRST , **_lowerCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :Dict = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ :Dict = size if size is not None else self.size
lowerCAmelCase__ :int = get_size_dict(_lowerCAmelCase )
lowerCAmelCase__ :List[str] = resample if resample is not None else self.resample
lowerCAmelCase__ :Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ :int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ :Any = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ :Union[str, Any] = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ :Tuple = image_std if image_std is not None else self.image_std
lowerCAmelCase__ :List[str] = apply_ocr if apply_ocr is not None else self.apply_ocr
lowerCAmelCase__ :List[Any] = ocr_lang if ocr_lang is not None else self.ocr_lang
lowerCAmelCase__ :Tuple = tesseract_config if tesseract_config is not None else self.tesseract_config
lowerCAmelCase__ :Dict = make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("If do_normalize is True, image_mean and image_std must be specified." )
# All transformations expect numpy arrays.
lowerCAmelCase__ :List[str] = [to_numpy_array(_lowerCAmelCase ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , "pytesseract" )
lowerCAmelCase__ :List[Any] = []
lowerCAmelCase__ :Tuple = []
for image in images:
lowerCAmelCase__ ,lowerCAmelCase__ :List[Any] = apply_tesseract(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
words_batch.append(_lowerCAmelCase )
boxes_batch.append(_lowerCAmelCase )
if do_resize:
lowerCAmelCase__ :Tuple = [self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images]
if do_rescale:
lowerCAmelCase__ :Dict = [self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase ) for image in images]
if do_normalize:
lowerCAmelCase__ :List[Any] = [self.normalize(image=_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase ) for image in images]
lowerCAmelCase__ :List[str] = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
lowerCAmelCase__ :Optional[int] = BatchFeature(data={"pixel_values": images} , tensor_type=_lowerCAmelCase )
if apply_ocr:
lowerCAmelCase__ :Dict = words_batch
lowerCAmelCase__ :Union[str, Any] = boxes_batch
return data
| 145 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class _UpperCAmelCase ( _A ):
"""simple docstring"""
A = '''EncodecFeatureExtractor'''
A = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
'''simple docstring'''
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
lowerCAmelCase__ :Optional[int] = self.feature_extractor
lowerCAmelCase__ :Tuple = False
def snake_case_ ( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=True ):
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=_lowerCAmelCase , language=_lowerCAmelCase , no_timestamps=_lowerCAmelCase )
def __call__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowerCAmelCase , **_lowerCAmelCase )
lowerCAmelCase__ :Optional[Any] = kwargs.pop("audio" , _lowerCAmelCase )
lowerCAmelCase__ :Optional[int] = kwargs.pop("sampling_rate" , _lowerCAmelCase )
lowerCAmelCase__ :Dict = kwargs.pop("text" , _lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
lowerCAmelCase__ :Optional[int] = args[0]
lowerCAmelCase__ :Tuple = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if text is not None:
lowerCAmelCase__ :Any = self.tokenizer(_lowerCAmelCase , **_lowerCAmelCase )
if audio is not None:
lowerCAmelCase__ :Tuple = self.feature_extractor(_lowerCAmelCase , *_lowerCAmelCase , sampling_rate=_lowerCAmelCase , **_lowerCAmelCase )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
lowerCAmelCase__ :List[str] = audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
lowerCAmelCase__ :int = audio_inputs["padding_mask"]
return inputs
def snake_case_ ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = kwargs.pop("audio" , _lowerCAmelCase )
lowerCAmelCase__ :Optional[int] = kwargs.pop("padding_mask" , _lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
lowerCAmelCase__ :int = args[0]
lowerCAmelCase__ :List[str] = args[1:]
if audio_values is not None:
return self._decode_audio(_lowerCAmelCase , padding_mask=_lowerCAmelCase )
else:
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def snake_case_ ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = to_numpy(_lowerCAmelCase )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ :Optional[Any] = audio_values.shape
if padding_mask is None:
return list(_lowerCAmelCase )
lowerCAmelCase__ :List[str] = to_numpy(_lowerCAmelCase )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
lowerCAmelCase__ :str = seq_len - padding_mask.shape[-1]
lowerCAmelCase__ :Union[str, Any] = 1 - self.feature_extractor.padding_value
lowerCAmelCase__ :Optional[Any] = np.pad(_lowerCAmelCase , ((0, 0), (0, difference)) , "constant" , constant_values=_lowerCAmelCase )
lowerCAmelCase__ :Union[str, Any] = audio_values.tolist()
for i in range(_lowerCAmelCase ):
lowerCAmelCase__ :str = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
lowerCAmelCase__ :List[Any] = sliced_audio.reshape(_lowerCAmelCase , -1 )
return audio_values
| 145 | 1 |
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
_A : Any = ['input_ids', 'attention_mask']
def __init__( self , lowerCamelCase="</s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase=1_25 , lowerCamelCase=None , **lowerCamelCase , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
snake_case__ = [F"""<extra_id_{i}>""" for i in range(lowerCamelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
snake_case__ = len(set(filter(lambda lowerCamelCase : bool("extra_id" in str(lowerCamelCase ) ) , lowerCamelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"
" extra_ids tokens" )
snake_case__ = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else pad_token
snake_case__ = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else eos_token
snake_case__ = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else unk_token
super().__init__(
eos_token=lowerCamelCase , unk_token=lowerCamelCase , pad_token=lowerCamelCase , extra_ids=lowerCamelCase , additional_special_tokens=lowerCamelCase , **lowerCamelCase , )
snake_case__ = extra_ids
snake_case__ = 2**8 # utf is 8 bits
# define special tokens dict
snake_case__ = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
snake_case__ = len(self.special_tokens_encoder )
snake_case__ = len(lowerCamelCase )
for i, token in enumerate(lowerCamelCase ):
snake_case__ = self.vocab_size + i - n
snake_case__ = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def A_ ( self ):
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def A_ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowerCamelCase )) + [1]
return ([0] * len(lowerCamelCase )) + [1] + ([0] * len(lowerCamelCase )) + [1]
def A_ ( self , lowerCamelCase ):
if len(lowerCamelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def A_ ( self , lowerCamelCase , lowerCamelCase = None ):
snake_case__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def A_ ( self , lowerCamelCase , lowerCamelCase = None ):
snake_case__ = self._add_eos_if_not_present(lowerCamelCase )
if token_ids_a is None:
return token_ids_a
else:
snake_case__ = self._add_eos_if_not_present(lowerCamelCase )
return token_ids_a + token_ids_a
def A_ ( self , lowerCamelCase ):
snake_case__ = [chr(lowerCamelCase ) for i in text.encode("utf-8" )]
return tokens
def A_ ( self , lowerCamelCase ):
if token in self.special_tokens_encoder:
snake_case__ = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
snake_case__ = self.added_tokens_encoder[token]
elif len(lowerCamelCase ) != 1:
snake_case__ = self.unk_token_id
else:
snake_case__ = ord(lowerCamelCase ) + self._num_special_tokens
return token_id
def A_ ( self , lowerCamelCase ):
if index in self.special_tokens_decoder:
snake_case__ = self.special_tokens_decoder[index]
else:
snake_case__ = chr(index - self._num_special_tokens )
return token
def A_ ( self , lowerCamelCase ):
snake_case__ = B""
for token in tokens:
if token in self.special_tokens_decoder:
snake_case__ = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.added_tokens_decoder:
snake_case__ = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.special_tokens_encoder:
snake_case__ = token.encode("utf-8" )
elif token in self.added_tokens_encoder:
snake_case__ = token.encode("utf-8" )
else:
snake_case__ = bytes([ord(lowerCamelCase )] )
bstring += tok_string
snake_case__ = bstring.decode("utf-8" , errors="ignore" )
return string
def A_ ( self , lowerCamelCase , lowerCamelCase = None ):
return ()
| 530 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 530 | 1 |
"""simple docstring"""
from math import sqrt
def a__ ( lowerCAmelCase ) -> int:
UpperCAmelCase__ : str = 0
for i in range(1 , int(sqrt(lowerCAmelCase ) + 1 ) ):
if n % i == 0 and i != sqrt(lowerCAmelCase ):
total += i + n // i
elif i == sqrt(lowerCAmelCase ):
total += i
return total - n
def a__ ( lowerCAmelCase = 1_00_00 ) -> int:
UpperCAmelCase__ : Union[str, Any] = sum(
i
for i in range(1 , lowerCAmelCase )
if sum_of_divisors(sum_of_divisors(lowerCAmelCase ) ) == i and sum_of_divisors(lowerCAmelCase ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 182 |
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = FunnelConfig.from_json_file(SCREAMING_SNAKE_CASE )
print(F'Building PyTorch model from configuration: {config}' )
SCREAMING_SNAKE_CASE_ = FunnelBaseModel(SCREAMING_SNAKE_CASE ) if base_model else FunnelModel(SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 205 | 0 |
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case_ (_a : int , _a : Optional[int] , _a : Any , _a : int , _a : List[str] ):
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
UpperCAmelCase = TapasConfig.from_json_file(__A )
# set absolute/relative position embeddings parameter
UpperCAmelCase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
UpperCAmelCase = TapasForQuestionAnswering(config=__A )
elif task == "WTQ":
# run_task_main.py hparams
UpperCAmelCase = 4
UpperCAmelCase = True
# hparam_utils.py hparams
UpperCAmelCase = 0.66_4694
UpperCAmelCase = 0.20_7951
UpperCAmelCase = 0.12_1194
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = 0.035_2513
UpperCAmelCase = TapasForQuestionAnswering(config=__A )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
UpperCAmelCase = 4
UpperCAmelCase = False
# hparam_utils.py hparams
UpperCAmelCase = 36.4519
UpperCAmelCase = 0.90_3421
UpperCAmelCase = 222.088
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = 0.76_3141
UpperCAmelCase = TapasForQuestionAnswering(config=__A )
elif task == "TABFACT":
UpperCAmelCase = TapasForSequenceClassification(config=__A )
elif task == "MLM":
UpperCAmelCase = TapasForMaskedLM(config=__A )
elif task == "INTERMEDIATE_PRETRAINING":
UpperCAmelCase = TapasModel(config=__A )
else:
raise ValueError(F"Task {task} not supported." )
print(F"Building PyTorch model from configuration: {config}" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(__A , __A , __A )
# Save pytorch-model (weights and configuration)
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(__A )
# Save tokenizer files
print(F"Save tokenizer files to {pytorch_dump_path}" )
UpperCAmelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-1_0] + '''vocab.txt''' , model_max_length=5_1_2 )
tokenizer.save_pretrained(__A )
print('''Used relative position embeddings:''' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
A =parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 701 |
'''simple docstring'''
def snake_case_ (_a : list[list[int]] , _a : int , _a : int , _a : list[int] ):
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def snake_case_ (_a : list[list[int]] , _a : list[int] , _a : int ):
# Base Case
if curr_ind == len(_a ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(_a ) ):
if valid_connection(_a , _a , _a , _a ):
# Insert current vertex into path as next transition
UpperCAmelCase = next_ver
# Validate created path
if util_hamilton_cycle(_a , _a , curr_ind + 1 ):
return True
# Backtrack
UpperCAmelCase = -1
return False
def snake_case_ (_a : list[list[int]] , _a : int = 0 ):
UpperCAmelCase = [-1] * (len(_a ) + 1)
# initialize start and end of path with starting index
UpperCAmelCase = UpperCAmelCase = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(_a , _a , 1 ) else []
| 358 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
if is_vision_available():
import PIL
class UpperCamelCase ( _UpperCAmelCase ):
lowerCAmelCase : Union[str, Any] = ["""pixel_values"""]
def __init__( self , UpperCAmelCase__ = True , UpperCAmelCase__ = None , UpperCAmelCase__ = PILImageResampling.BICUBIC , UpperCAmelCase__ = True , UpperCAmelCase__ = None , UpperCAmelCase__ = True , UpperCAmelCase__ = 1 / 255 , UpperCAmelCase__ = True , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = True , **UpperCAmelCase__ , ):
super().__init__(**UpperCAmelCase__ )
A__ = size if size is not None else {"shortest_edge": 224}
A__ = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
A__ = crop_size if crop_size is not None else {"height": 224, "width": 224}
A__ = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ , param_name="crop_size" )
A__ = do_resize
A__ = size
A__ = resample
A__ = do_center_crop
A__ = crop_size
A__ = do_rescale
A__ = rescale_factor
A__ = do_normalize
A__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A__ = image_std if image_std is not None else OPENAI_CLIP_STD
A__ = do_convert_rgb
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = PILImageResampling.BICUBIC , UpperCAmelCase__ = None , **UpperCAmelCase__ , ):
A__ = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
A__ = get_resize_output_image_size(UpperCAmelCase__ , size=size["shortest_edge"] , default_to_square=UpperCAmelCase__ )
return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , **UpperCAmelCase__ , ):
A__ = get_size_dict(UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(UpperCAmelCase__ , size=(size["height"], size["width"]) , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , **UpperCAmelCase__ , ):
return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , **UpperCAmelCase__ , ):
return normalize(UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = ChannelDimension.FIRST , **UpperCAmelCase__ , ):
A__ = do_resize if do_resize is not None else self.do_resize
A__ = size if size is not None else self.size
A__ = get_size_dict(UpperCAmelCase__ , param_name="size" , default_to_square=UpperCAmelCase__ )
A__ = resample if resample is not None else self.resample
A__ = do_center_crop if do_center_crop is not None else self.do_center_crop
A__ = crop_size if crop_size is not None else self.crop_size
A__ = get_size_dict(UpperCAmelCase__ , param_name="crop_size" , default_to_square=UpperCAmelCase__ )
A__ = do_rescale if do_rescale is not None else self.do_rescale
A__ = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = image_mean if image_mean is not None else self.image_mean
A__ = image_std if image_std is not None else self.image_std
A__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A__ = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A__ = [convert_to_rgb(UpperCAmelCase__ ) for image in images]
# All transformations expect numpy arrays.
A__ = [to_numpy_array(UpperCAmelCase__ ) for image in images]
if do_resize:
A__ = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
if do_center_crop:
A__ = [self.center_crop(image=UpperCAmelCase__ , size=UpperCAmelCase__ ) for image in images]
if do_rescale:
A__ = [self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images]
if do_normalize:
A__ = [self.normalize(image=UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ ) for image in images]
A__ = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
A__ = {"pixel_values": images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
| 491 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
UpperCAmelCase_ : List[str] = logging.getLogger(__name__)
class UpperCamelCase :
def __init__( self ):
A__ = False
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
if not self.initialized:
A__ = RagRetriever(
UpperCAmelCase__ , question_encoder_tokenizer=UpperCAmelCase__ , generator_tokenizer=UpperCAmelCase__ , index=UpperCAmelCase__ , init_retrieval=UpperCAmelCase__ , )
A__ = True
def __A ( self ):
self.retriever.index.init_index()
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ , A__ = self.retriever._main_retrieve(UpperCAmelCase__ , UpperCAmelCase__ )
return doc_ids, retrieved_doc_embeds
class UpperCamelCase ( _UpperCAmelCase ):
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None ):
if index is not None and index.is_initialized() and len(UpperCAmelCase__ ) > 0:
raise ValueError(
"When using Ray for distributed fine-tuning, "
"you'll need to provide the paths instead, "
"as the dataset and the index are loaded "
"separately. More info in examples/rag/use_own_knowledge_dataset.py " )
super().__init__(
UpperCAmelCase__ , question_encoder_tokenizer=UpperCAmelCase__ , generator_tokenizer=UpperCAmelCase__ , index=UpperCAmelCase__ , init_retrieval=UpperCAmelCase__ , )
A__ = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
for worker in self.retrieval_workers
] )
def __A ( self ):
logger.info("initializing retrieval" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ):
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
A__ = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
A__ , A__ = ray.get(random_worker.retrieve.remote(UpperCAmelCase__ , UpperCAmelCase__ ) )
else:
A__ , A__ = self._main_retrieve(UpperCAmelCase__ , UpperCAmelCase__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCAmelCase__ )
@classmethod
def __A ( cls , UpperCAmelCase__ , UpperCAmelCase__=None , **UpperCAmelCase__ ):
return super(UpperCAmelCase__ , cls ).get_tokenizers(UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
@classmethod
def __A ( cls , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None , **UpperCAmelCase__ ):
A__ = kwargs.pop("config" , UpperCAmelCase__ ) or RagConfig.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
A__ = RagTokenizer.from_pretrained(UpperCAmelCase__ , config=UpperCAmelCase__ )
A__ = rag_tokenizer.question_encoder
A__ = rag_tokenizer.generator
if indexed_dataset is not None:
A__ = "custom"
A__ = CustomHFIndex(config.retrieval_vector_size , UpperCAmelCase__ )
else:
A__ = cls._build_index(UpperCAmelCase__ )
return cls(
UpperCAmelCase__ , question_encoder_tokenizer=UpperCAmelCase__ , generator_tokenizer=UpperCAmelCase__ , retrieval_workers=UpperCAmelCase__ , index=UpperCAmelCase__ , )
| 491 | 1 |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
UpperCamelCase__ = '''\
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
'''
UpperCamelCase__ = '''\
IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
'''
UpperCamelCase__ = '''
Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset.
Args:
predictions: list of predictions to score (as int64),
except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).
references: list of ground truth labels corresponding to the predictions (as int64),
except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).
Returns: depending on the IndicGLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"precision": Precision@10
Examples:
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')
>>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'precision@10\': 1.0}
'''
def UpperCAmelCase__ ( _A , _A ):
"""simple docstring"""
return float((preds == labels).mean() )
def UpperCAmelCase__ ( _A , _A ):
"""simple docstring"""
a_ = simple_accuracy(_A , _A )
a_ = float(fa_score(y_true=_A , y_pred=_A ) )
return {
"accuracy": acc,
"f1": fa,
}
def UpperCAmelCase__ ( _A , _A ):
"""simple docstring"""
a_ = np.array(_A )
a_ = np.array(_A )
a_ = en_sentvecs.shape[0]
# mean centering
a_ = en_sentvecs - np.mean(_A , axis=0 )
a_ = in_sentvecs - np.mean(_A , axis=0 )
a_ = cdist(_A , _A , '''cosine''' )
a_ = np.array(range(_A ) )
a_ = sim.argsort(axis=1 )[:, :10]
a_ = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
def __magic_name__ ( self : Union[str, Any] ):
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
'''references''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None , )
def __magic_name__ ( self : str , lowercase__ : str , lowercase__ : int ):
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(lowercase__ , lowercase__ )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(lowercase__ , lowercase__ )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(lowercase__ , lowercase__ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
| 703 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = '''▁'''
UpperCamelCase__ = {'''vocab_file''': '''spiece.model'''}
UpperCamelCase__ = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''}
}
UpperCamelCase__ = {
'''google/pegasus-xsum''': 512,
}
UpperCamelCase__ = logging.get_logger(__name__)
class __lowercase ( a__ ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__( self : Tuple , lowercase__ : Tuple , lowercase__ : List[str]="<pad>" , lowercase__ : Any="</s>" , lowercase__ : Union[str, Any]="<unk>" , lowercase__ : Any="<mask_2>" , lowercase__ : int="<mask_1>" , lowercase__ : List[Any]=None , lowercase__ : List[str]=1_0_3 , lowercase__ : Optional[Dict[str, Any]] = None , **lowercase__ : List[Any] , ):
a_ = offset
if additional_special_tokens is not None:
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError(
f"additional_special_tokens should be of type {type(lowercase__ )}, but is"
f" {type(lowercase__ )}" )
a_ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"<unk_{i}>" for i in range(len(lowercase__ ) , self.offset - 1 )
]
if len(set(lowercase__ ) ) != len(lowercase__ ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." )
a_ = additional_special_tokens_extended
else:
a_ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"<unk_{i}>" for i in range(2 , self.offset )]
a_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase__ , unk_token=lowercase__ , mask_token=lowercase__ , pad_token=lowercase__ , mask_token_sent=lowercase__ , offset=lowercase__ , additional_special_tokens=lowercase__ , sp_model_kwargs=self.sp_model_kwargs , **lowercase__ , )
a_ = mask_token_sent
a_ = vocab_file
a_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase__ )
# add special tokens to encoder dict
a_ = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
a_ = {v: k for k, v in self.encoder.items()}
@property
def __magic_name__ ( self : Optional[Any] ):
return len(self.sp_model ) + self.offset
def __magic_name__ ( self : Dict ):
a_ = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ):
a_ = self.__dict__.copy()
a_ = None
return state
def __setstate__( self : Tuple , lowercase__ : str ):
a_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
a_ = {}
a_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __magic_name__ ( self : Tuple , lowercase__ : str ):
return self.sp_model.encode(lowercase__ , out_type=lowercase__ )
def __magic_name__ ( self : List[Any] , lowercase__ : str ):
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
a_ = self.sp_model.piece_to_id(lowercase__ )
return sp_id + self.offset
def __magic_name__ ( self : str , lowercase__ : int ):
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
a_ = self.sp_model.IdToPiece(index - self.offset )
return token
def __magic_name__ ( self : Optional[int] , lowercase__ : List[str] ):
a_ = []
a_ = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase__ ) + token
a_ = []
else:
current_sub_tokens.append(lowercase__ )
out_string += self.sp_model.decode(lowercase__ )
return out_string.strip()
def __magic_name__ ( self : Tuple , lowercase__ : Optional[int]=False ):
return 1
def __magic_name__ ( self : Any , lowercase__ : Any ):
a_ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def __magic_name__ ( self : Union[str, Any] , lowercase__ : List , lowercase__ : Optional[List] = None , lowercase__ : bool = False ):
if already_has_special_tokens:
return self._special_token_mask(lowercase__ )
elif token_ids_a is None:
return self._special_token_mask(lowercase__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def __magic_name__ ( self : Union[str, Any] , lowercase__ : Any , lowercase__ : Optional[Any]=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __magic_name__ ( self : Union[str, Any] , lowercase__ : str , lowercase__ : Optional[str] = None ):
if not os.path.isdir(lowercase__ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
a_ = os.path.join(
lowercase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase__ , '''wb''' ) as fi:
a_ = self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (out_vocab_file,)
| 143 | 0 |
__SCREAMING_SNAKE_CASE : int = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
__SCREAMING_SNAKE_CASE : Dict = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
__SCREAMING_SNAKE_CASE : Tuple = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
} | 670 |
def lowerCAmelCase ( UpperCamelCase__ : list , UpperCamelCase__ : list , UpperCamelCase__ : int ) -> list:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Tuple = len(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE: Optional[int] = [[0] * n for i in range(UpperCamelCase__ )]
for i in range(UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE: str = y_points[i]
for i in range(2 , UpperCamelCase__ ):
for j in range(UpperCamelCase__ , UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE: Optional[int] = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 202 | 0 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowercase ( _lowercase ):
"""simple docstring"""
@staticmethod
@abstractmethod
def A__ ( __snake_case):
raise NotImplementedError()
@abstractmethod
def A__ ( self):
raise NotImplementedError()
| 648 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowerCAmelCase__ = numpy.array([0, 0])
lowerCAmelCase__ = numpy.array([0.5, 0.8_66_02_54])
lowerCAmelCase__ = numpy.array([1, 0])
lowerCAmelCase__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] , UpperCAmelCase_ : int ) -> list[numpy.ndarray]:
'''simple docstring'''
_UpperCamelCase : Tuple = initial_vectors
for _ in range(UpperCAmelCase_ ):
_UpperCamelCase : str = iteration_step(UpperCAmelCase_ )
return vectors
def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] ) -> list[numpy.ndarray]:
'''simple docstring'''
_UpperCamelCase : int = []
for i, start_vector in enumerate(vectors[:-1] ):
_UpperCamelCase : Union[str, Any] = vectors[i + 1]
new_vectors.append(UpperCAmelCase_ )
_UpperCamelCase : Tuple = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def lowerCamelCase_ ( UpperCAmelCase_ : numpy.ndarray , UpperCAmelCase_ : float ) -> numpy.ndarray:
'''simple docstring'''
_UpperCamelCase : str = numpy.radians(UpperCAmelCase_ )
_UpperCamelCase , _UpperCamelCase : Optional[Any] = numpy.cos(UpperCAmelCase_ ), numpy.sin(UpperCAmelCase_ )
_UpperCamelCase : Any = numpy.array(((c, -s), (s, c)) )
return numpy.dot(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] ) -> None:
'''simple docstring'''
_UpperCamelCase : str = plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_UpperCamelCase , _UpperCamelCase : Dict = zip(*UpperCAmelCase_ )
plt.plot(UpperCAmelCase_ , UpperCAmelCase_ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 648 | 1 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = False ):
"""simple docstring"""
if radian_mode:
return [magnitude * cos(UpperCamelCase__ ), magnitude * sin(UpperCamelCase__ )]
return [magnitude * cos(radians(UpperCamelCase__ ) ), magnitude * sin(radians(UpperCamelCase__ ) )]
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 10**-1 ):
"""simple docstring"""
a_ = cross(UpperCamelCase__ , UpperCamelCase__ )
a_ = sum(UpperCamelCase__ )
return abs(UpperCamelCase__ ) < eps
if __name__ == "__main__":
# Test to check if it works
A_ : List[str] =array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
A_ : NDArray[floataa] =array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
A_ : int =array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
A_ : int =array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
A_ : str =array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]])
A_ : Optional[Any] =array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod() | 483 |
'''simple docstring'''
import argparse
import struct
import unittest
class _snake_case :
'''simple docstring'''
def __init__( self: Optional[int] , __UpperCamelCase: bytes ) -> None:
__magic_name__ : str = data
# Initialize hash values
__magic_name__ : List[str] = [
0x6A09_E667,
0xBB67_AE85,
0x3C6E_F372,
0xA54F_F53A,
0x510E_527F,
0x9B05_688C,
0x1F83_D9AB,
0x5BE0_CD19,
]
# Initialize round constants
__magic_name__ : Dict = [
0x428A_2F98,
0x7137_4491,
0xB5C0_FBCF,
0xE9B5_DBA5,
0x3956_C25B,
0x59F1_11F1,
0x923F_82A4,
0xAB1C_5ED5,
0xD807_AA98,
0x1283_5B01,
0x2431_85BE,
0x550C_7DC3,
0x72BE_5D74,
0x80DE_B1FE,
0x9BDC_06A7,
0xC19B_F174,
0xE49B_69C1,
0xEFBE_4786,
0x0FC1_9DC6,
0x240C_A1CC,
0x2DE9_2C6F,
0x4A74_84AA,
0x5CB0_A9DC,
0x76F9_88DA,
0x983E_5152,
0xA831_C66D,
0xB003_27C8,
0xBF59_7FC7,
0xC6E0_0BF3,
0xD5A7_9147,
0x06CA_6351,
0x1429_2967,
0x27B7_0A85,
0x2E1B_2138,
0x4D2C_6DFC,
0x5338_0D13,
0x650A_7354,
0x766A_0ABB,
0x81C2_C92E,
0x9272_2C85,
0xA2BF_E8A1,
0xA81A_664B,
0xC24B_8B70,
0xC76C_51A3,
0xD192_E819,
0xD699_0624,
0xF40E_3585,
0x106A_A070,
0x19A4_C116,
0x1E37_6C08,
0x2748_774C,
0x34B0_BCB5,
0x391C_0CB3,
0x4ED8_AA4A,
0x5B9C_CA4F,
0x682E_6FF3,
0x748F_82EE,
0x78A5_636F,
0x84C8_7814,
0x8CC7_0208,
0x90BE_FFFA,
0xA450_6CEB,
0xBEF9_A3F7,
0xC671_78F2,
]
__magic_name__ : str = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def lowerCAmelCase__ ( __UpperCamelCase: bytes ) -> bytes:
__magic_name__ : List[Any] = b"\x80" + (b"\x00" * (63 - (len(__UpperCamelCase ) + 8) % 64))
__magic_name__ : str = struct.pack(">Q" , (len(__UpperCamelCase ) * 8) )
return data + padding + big_endian_integer
def lowerCAmelCase__ ( self: int ) -> None:
# Convert into blocks of 64 bytes
__magic_name__ : Any = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
__magic_name__ : Tuple = list(struct.unpack(">16L" , __UpperCamelCase ) )
# add 48 0-ed integers
words += [0] * 48
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Tuple = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
__magic_name__ : Optional[Any] = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
__magic_name__ : Tuple = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
__magic_name__ : Any = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_0000_0000
# Compression
__magic_name__ : Union[str, Any] = self.ror(__UpperCamelCase , 6 ) ^ self.ror(__UpperCamelCase , 11 ) ^ self.ror(__UpperCamelCase , 25 )
__magic_name__ : Tuple = (e & f) ^ ((~e & 0xFFFF_FFFF) & g)
__magic_name__ : Dict = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0000_0000
__magic_name__ : int = self.ror(__UpperCamelCase , 2 ) ^ self.ror(__UpperCamelCase , 13 ) ^ self.ror(__UpperCamelCase , 22 )
__magic_name__ : Optional[int] = (a & b) ^ (a & c) ^ (b & c)
__magic_name__ : int = (sa + maj) % 0x1_0000_0000
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Tuple = (
g,
f,
e,
((d + tempa) % 0x1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0x1_0000_0000),
)
__magic_name__ : Optional[Any] = [a, b, c, d, e, f, g, h]
# Modify final values
__magic_name__ : int = [
((element + mutated_hash_values[index]) % 0x1_0000_0000)
for index, element in enumerate(self.hashes )
]
__magic_name__ : List[str] = "".join([hex(__UpperCamelCase )[2:].zfill(8 ) for value in self.hashes] )
def lowerCAmelCase__ ( self: List[Any] , __UpperCamelCase: int , __UpperCamelCase: int ) -> int:
return 0xFFFF_FFFF & (value << (32 - rotations)) | (value >> rotations)
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self: Dict ) -> None:
import hashlib
__magic_name__ : Tuple = bytes("Test String" , "utf-8" )
self.assertEqual(SHAaaa(__UpperCamelCase ).hash , hashlib.shaaaa(__UpperCamelCase ).hexdigest() )
def _UpperCamelCase ( ):
"""simple docstring"""
import doctest
doctest.testmod()
__magic_name__ : Dict = argparse.ArgumentParser()
parser.add_argument(
"-s" , "--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument(
"-f" , "--file" , dest="input_file" , help="Hash contents of a file" )
__magic_name__ : Any = parser.parse_args()
__magic_name__ : List[str] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb" ) as f:
__magic_name__ : Union[str, Any] = f.read()
else:
__magic_name__ : List[str] = bytes(UpperCamelCase__ , "utf-8" )
print(SHAaaa(UpperCamelCase__ ).hash )
if __name__ == "__main__":
main() | 436 | 0 |
"""simple docstring"""
def _A (__a ) -> int:
"""simple docstring"""
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
SCREAMING_SNAKE_CASE_ : str = grid[0]
for row_n in range(1 , len(__a ) ):
SCREAMING_SNAKE_CASE_ : Tuple = grid[row_n]
SCREAMING_SNAKE_CASE_ : int = fill_row(__a , __a )
SCREAMING_SNAKE_CASE_ : List[str] = grid[row_n]
return grid[-1][-1]
def _A (__a , __a ) -> list:
"""simple docstring"""
current_row[0] += row_above[0]
for cell_n in range(1 , len(__a ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 176 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : int , lowercase_ : List[Any] , lowercase_ : int=14 , lowercase_ : Tuple=7 , lowercase_ : int=True , lowercase_ : int=True , lowercase_ : int=False , lowercase_ : Optional[int]=True , lowercase_ : Union[str, Any]=99 , lowercase_ : Optional[int]=32 , lowercase_ : List[Any]=4 , lowercase_ : Optional[int]=4 , lowercase_ : List[str]=4 , lowercase_ : Tuple=37 , lowercase_ : Any="gelu" , lowercase_ : Dict=0.1 , lowercase_ : str=0.1 , lowercase_ : Optional[int]=512 , lowercase_ : Dict=0.02 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE_ : Any = batch_size
SCREAMING_SNAKE_CASE_ : Tuple = seq_length
SCREAMING_SNAKE_CASE_ : Any = is_training
SCREAMING_SNAKE_CASE_ : int = use_input_mask
SCREAMING_SNAKE_CASE_ : Optional[int] = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Tuple = use_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE_ : str = hidden_size
SCREAMING_SNAKE_CASE_ : Tuple = rotary_dim
SCREAMING_SNAKE_CASE_ : str = num_hidden_layers
SCREAMING_SNAKE_CASE_ : int = num_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : str = vocab_size - 1
SCREAMING_SNAKE_CASE_ : List[Any] = vocab_size - 1
SCREAMING_SNAKE_CASE_ : List[Any] = vocab_size - 1
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE_ : str = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : str = random_attention_mask([self.batch_size, self.seq_length])
SCREAMING_SNAKE_CASE_ : Union[str, Any] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowercase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE_ : int = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : Any , lowercase_ : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 20
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_class_name(lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model.init_cache(input_ids.shape[0] , lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='''i4''')
SCREAMING_SNAKE_CASE_ : Any = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1))
SCREAMING_SNAKE_CASE_ : Any = model(
input_ids[:, :-1] , attention_mask=lowercase_ , past_key_values=lowercase_ , position_ids=lowercase_ , )
SCREAMING_SNAKE_CASE_ : str = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''')
SCREAMING_SNAKE_CASE_ : int = model(
input_ids[:, -1:] , attention_mask=lowercase_ , past_key_values=outputs_cache.past_key_values , position_ids=lowercase_ , )
SCREAMING_SNAKE_CASE_ : List[str] = model(lowercase_)
SCREAMING_SNAKE_CASE_ : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}')
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = 20
SCREAMING_SNAKE_CASE_ : List[str] = model_class_name(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]))] , axis=-1 , )
SCREAMING_SNAKE_CASE_ : Tuple = model.init_cache(input_ids.shape[0] , lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1))
SCREAMING_SNAKE_CASE_ : Dict = model(
input_ids[:, :-1] , attention_mask=lowercase_ , past_key_values=lowercase_ , position_ids=lowercase_ , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''')
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowercase_ , position_ids=lowercase_ , )
SCREAMING_SNAKE_CASE_ : Any = model(lowercase_ , attention_mask=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}')
@require_flax
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
__UpperCamelCase = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = FlaxGPTJModelTester(self)
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(lowercase_ , lowercase_ , lowercase_ , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
lowercase_ , lowercase_ , lowercase_ , lowercase_)
@tooslow
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = GPTaTokenizer.from_pretrained('''gpt2''' , pad_token='''<|endoftext|>''' , padding_side='''left''')
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer(['''Hello this is a long string''', '''Hey'''] , return_tensors='''np''' , padding=lowercase_ , truncation=lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''')
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : str = model.config.eos_token_id
SCREAMING_SNAKE_CASE_ : Optional[Any] = jax.jit(model.generate)
SCREAMING_SNAKE_CASE_ : Tuple = jit_generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , pad_token_id=tokenizer.pad_token_id).sequences
SCREAMING_SNAKE_CASE_ : int = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_)
SCREAMING_SNAKE_CASE_ : int = [
'''Hello this is a long string of text.\n\nI\'m trying to get the text of the''',
'''Hey, I\'m a little late to the party. I\'m going to''',
]
self.assertListEqual(lowercase_ , lowercase_)
@is_pt_flax_cross_test
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# prepare inputs
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._prepare_for_class(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Any = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE_ : Optional[Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE_ : Tuple = getattr(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = pt_inputs['''input_ids'''].shape
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randint(0 , seq_length - 1 , size=(batch_size,))
for batch_idx, start_index in enumerate(lowercase_):
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
SCREAMING_SNAKE_CASE_ : List[Any] = 1
SCREAMING_SNAKE_CASE_ : List[Any] = 0
SCREAMING_SNAKE_CASE_ : List[str] = 1
SCREAMING_SNAKE_CASE_ : List[Any] = pt_model_class(lowercase_).eval()
SCREAMING_SNAKE_CASE_ : Any = model_class(lowercase_ , dtype=jnp.floataa)
SCREAMING_SNAKE_CASE_ : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = fx_state
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Dict = pt_model(**lowercase_).to_tuple()
SCREAMING_SNAKE_CASE_ : List[Any] = fx_model(**lowercase_).to_tuple()
self.assertEqual(len(lowercase_) , len(lowercase_) , '''Output lengths differ between Flax and PyTorch''')
for fx_output, pt_output in zip(lowercase_ , lowercase_):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2)
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowercase_)
SCREAMING_SNAKE_CASE_ : int = model_class.from_pretrained(lowercase_ , from_pt=lowercase_)
SCREAMING_SNAKE_CASE_ : str = fx_model_loaded(**lowercase_).to_tuple()
self.assertEqual(
len(lowercase_) , len(lowercase_) , '''Output lengths differ between Flax and PyTorch''')
for fx_output_loaded, pt_output in zip(lowercase_ , lowercase_):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2)
@is_pt_flax_cross_test
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# prepare inputs
SCREAMING_SNAKE_CASE_ : Tuple = self._prepare_for_class(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE_ : Optional[int] = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE_ : Dict = getattr(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : int = pt_model_class(lowercase_).eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = model_class(lowercase_ , dtype=jnp.floataa)
SCREAMING_SNAKE_CASE_ : int = load_flax_weights_in_pytorch_model(lowercase_ , fx_model.params)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = pt_inputs['''input_ids'''].shape
SCREAMING_SNAKE_CASE_ : Any = np.random.randint(0 , seq_length - 1 , size=(batch_size,))
for batch_idx, start_index in enumerate(lowercase_):
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : Optional[int] = 1
SCREAMING_SNAKE_CASE_ : str = 0
SCREAMING_SNAKE_CASE_ : List[str] = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[int] = pt_model(**lowercase_).to_tuple()
SCREAMING_SNAKE_CASE_ : str = fx_model(**lowercase_).to_tuple()
self.assertEqual(len(lowercase_) , len(lowercase_) , '''Output lengths differ between Flax and PyTorch''')
for fx_output, pt_output in zip(lowercase_ , lowercase_):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2)
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = pt_model_class.from_pretrained(lowercase_ , from_flax=lowercase_)
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pt_model_loaded(**lowercase_).to_tuple()
self.assertEqual(
len(lowercase_) , len(lowercase_) , '''Output lengths differ between Flax and PyTorch''')
for fx_output, pt_output in zip(lowercase_ , lowercase_):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2)
@tooslow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Any = model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''')
SCREAMING_SNAKE_CASE_ : List[str] = model(np.ones((1, 1)))
self.assertIsNotNone(lowercase_)
| 176 | 1 |
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
"""simple docstring"""
if height >= 1:
move_tower(height - 1 , lowercase_ , lowercase_ , lowercase_ )
move_disk(lowercase_ , lowercase_ )
move_tower(height - 1 , lowercase_ , lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
print('''moving disk from''' , lowercase_ , '''to''' , lowercase_ )
def SCREAMING_SNAKE_CASE ( ) -> int:
"""simple docstring"""
A__ = int(input('''Height of hanoi: ''' ).strip() )
move_tower(lowercase_ , '''A''' , '''B''' , '''C''' )
if __name__ == "__main__":
main()
| 87 | '''simple docstring'''
def __UpperCAmelCase ( a_: int, a_: int ):
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
_UpperCAmelCase : Any = str(bin(a_ ) )[2:] # remove the leading "0b"
_UpperCAmelCase : Optional[Any] = str(bin(a_ ) )[2:] # remove the leading "0b"
_UpperCAmelCase : List[Any] = max(len(a_ ), len(a_ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(a_ ), b_binary.zfill(a_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 494 | 0 |
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__lowerCamelCase = "\\n Text data.\n Second line of data."
__lowerCamelCase = "file"
@pytest.fixture(scope='session' )
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = tmp_path_factory.mktemp('data' ) / (FILE_PATH + '.zstd')
A__ = bytes(UpperCamelCase__ , 'utf-8' )
with zstd.open(UpperCamelCase__ , 'wb' ) as f:
f.write(UpperCamelCase__ )
return path
@pytest.fixture
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
with open(os.path.join(tmpfs.local_root_dir , UpperCamelCase__ ) , 'w' ) as f:
f.write(UpperCamelCase__ )
return FILE_PATH
@pytest.mark.parametrize('compression_format' , ['gzip', 'xz', 'zstd'] )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_path}
A__ = input_paths[compression_format]
A__ = tmp_path / 'cache'
A__ = DownloadConfig(cache_dir=UpperCamelCase__ , extract_compressed_file=UpperCamelCase__ )
A__ = cached_path(UpperCamelCase__ , download_config=UpperCamelCase__ )
with open(UpperCamelCase__ ) as f:
A__ = f.read()
with open(UpperCamelCase__ ) as f:
A__ = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('default_extracted' , [True, False] )
@pytest.mark.parametrize('default_cache_dir' , [True, False] )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = 'custom_cache'
A__ = 'custom_extracted_dir'
A__ = tmp_path / 'custom_extracted_path'
if default_extracted:
A__ = ('downloads' if default_cache_dir else custom_cache_dir, 'extracted')
else:
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_DIR' , UpperCamelCase__ )
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(UpperCamelCase__ ) )
A__ = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
A__ = xz_file
A__ = (
DownloadConfig(extract_compressed_file=UpperCamelCase__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=UpperCamelCase__ )
)
A__ = cached_path(UpperCamelCase__ , download_config=UpperCamelCase__ )
assert Path(UpperCamelCase__ ).parent.parts[-2:] == expected
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = str(Path(UpperCamelCase__ ).resolve() )
assert cached_path(UpperCamelCase__ ) == text_file
# relative path
A__ = str(Path(UpperCamelCase__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(UpperCamelCase__ ) == text_file
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = str(tmp_path.resolve() / '__missing_file__.txt' )
with pytest.raises(UpperCamelCase__ ):
cached_path(UpperCamelCase__ )
# relative path
A__ = './__missing_file__.txt'
with pytest.raises(UpperCamelCase__ ):
cached_path(UpperCamelCase__ )
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = get_from_cache(F'''tmp://{tmpfs_file}''' )
with open(UpperCamelCase__ ) as f:
A__ = f.read()
assert output_file_content == FILE_CONTENT
@patch('datasets.config.HF_DATASETS_OFFLINE' , UpperCamelCase__ )
def UpperCAmelCase ( ):
"""simple docstring"""
with pytest.raises(UpperCamelCase__ ):
cached_path('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , UpperCamelCase__ )
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(UpperCamelCase__ ):
http_get('https://huggingface.co' , temp_file=UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ ):
http_head('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , UpperCamelCase__ )
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(UpperCamelCase__ ):
ftp_get('ftp://huggingface.co' , temp_file=UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ ):
ftp_head('ftp://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , UpperCamelCase__ )
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(UpperCamelCase__ ):
fsspec_get('s3://huggingface.co' , temp_file=UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ ):
fsspec_head('s3://huggingface.co' )
| 536 | """simple docstring"""
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class UpperCamelCase__( __A ):
lowerCAmelCase__ : Union[str, Any] = 'M-CLIP'
def __init__( self ,__UpperCAmelCase=10_24 ,__UpperCAmelCase=7_68 ,**__UpperCAmelCase ) -> List[str]:
A__ = transformerDimSize
A__ = imageDimSize
super().__init__(**__UpperCAmelCase )
class UpperCamelCase__( __A ):
lowerCAmelCase__ : Union[str, Any] = MCLIPConfig
def __init__( self ,__UpperCAmelCase ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[Any]:
super().__init__(__UpperCAmelCase ,*__UpperCAmelCase ,**__UpperCAmelCase )
A__ = XLMRobertaModel(__UpperCAmelCase )
A__ = torch.nn.Linear(
in_features=config.transformerDimensions ,out_features=config.numDims )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple:
A__ = self.transformer(input_ids=__UpperCAmelCase ,attention_mask=__UpperCAmelCase )[0]
A__ = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(__UpperCAmelCase ), embs
| 536 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class lowerCAmelCase__ ( UpperCAmelCase_ ):
lowercase__ : Dict = """naver-clova-ix/donut-base-finetuned-docvqa"""
lowercase__ : Optional[Any] = (
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
lowercase__ : Optional[int] = """document_qa"""
lowercase__ : Optional[Any] = AutoProcessor
lowercase__ : Any = VisionEncoderDecoderModel
lowercase__ : Dict = ["""image""", """text"""]
lowercase__ : Any = ["""text"""]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
if not is_vision_available():
raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool." )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
A__ = task_prompt.replace("{user_input}" , UpperCamelCase__ )
A__ = self.pre_processor.tokenizer(
UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors="pt" ).input_ids
A__ = self.pre_processor(UpperCamelCase__ , return_tensors="pt" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
return self.model.generate(
inputs["pixel_values"].to(self.device ) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=UpperCamelCase__ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=UpperCamelCase__ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=UpperCamelCase__ , ).sequences
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = self.pre_processor.batch_decode(UpperCamelCase__ )[0]
A__ = sequence.replace(self.pre_processor.tokenizer.eos_token , "" )
A__ = sequence.replace(self.pre_processor.tokenizer.pad_token , "" )
A__ = re.sub(R"<.*?>" , "" , UpperCamelCase__ , count=1 ).strip() # remove first task start token
A__ = self.pre_processor.tokenajson(UpperCamelCase__ )
return sequence["answer"] | 337 |
"""simple docstring"""
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase ="""▁"""
__UpperCAmelCase =get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
lowercase__ : str = BertGenerationTokenizer
lowercase__ : int = False
lowercase__ : Optional[Any] = True
def lowercase_ ( self ):
'''simple docstring'''
super().setUp()
A__ = BertGenerationTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self ):
'''simple docstring'''
A__ = "<s>"
A__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
A__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(UpperCamelCase__ ) , 10_02 )
def lowercase_ ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def lowercase_ ( self ):
'''simple docstring'''
A__ = BertGenerationTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
A__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(UpperCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [2_85, 46, 10, 1_70, 3_82] , )
A__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
A__ = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
A__ = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def lowercase_ ( self ):
'''simple docstring'''
return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
@slow
def lowercase_ ( self ):
'''simple docstring'''
A__ = "Hello World!"
A__ = [1_85_36, 22_60, 1_01]
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@slow
def lowercase_ ( self ):
'''simple docstring'''
A__ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
A__ = [
8_71,
4_19,
3_58,
9_46,
9_91,
25_21,
4_52,
3_58,
13_57,
3_87,
77_51,
35_36,
1_12,
9_85,
4_56,
1_26,
8_65,
9_38,
54_00,
57_34,
4_58,
13_68,
4_67,
7_86,
24_62,
52_46,
11_59,
6_33,
8_65,
45_19,
4_57,
5_82,
8_52,
25_57,
4_27,
9_16,
5_08,
4_05,
3_43_24,
4_97,
3_91,
4_08,
1_13_42,
12_44,
3_85,
1_00,
9_38,
9_85,
4_56,
5_74,
3_62,
1_25_97,
32_00,
31_29,
11_72,
]
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@require_torch
@slow
def lowercase_ ( self ):
'''simple docstring'''
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
A__ = list(self.big_tokenizer.get_vocab().keys() )[:10]
A__ = " ".join(UpperCamelCase__ )
A__ = self.big_tokenizer.encode_plus(UpperCamelCase__ , return_tensors="pt" , return_token_type_ids=UpperCamelCase__ )
A__ = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=UpperCamelCase__ )
A__ = BertGenerationConfig()
A__ = BertGenerationEncoder(UpperCamelCase__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCamelCase__ )
model(**UpperCamelCase__ )
@slow
def lowercase_ ( self ):
'''simple docstring'''
A__ = {"input_ids": [[3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14], [4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="google/bert_for_seq_generation_L-24_bbc_encoder" , revision="c817d1fd1be2ffa69431227a1fe320544943d4db" , ) | 337 | 1 |
'''simple docstring'''
import re
from ..utils import cached_file
# docstyle-ignore
_lowerCAmelCase = "\nHuman: <<task>>\n\nAssistant: "
_lowerCAmelCase = "huggingface-tools/default-prompts"
_lowerCAmelCase = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def UpperCamelCase ( a , a , a="run" ) -> str:
'''simple docstring'''
if prompt_or_repo_id is None:
__magic_name__ = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('''\\s''' , a ) is not None:
return prompt_or_repo_id
__magic_name__ = cached_file(
a , PROMPT_FILES[mode] , repo_type='''dataset''' , user_agent={'''agent''': agent_name} )
with open(a , '''r''' , encoding='''utf-8''' ) as f:
return f.read()
| 716 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _SCREAMING_SNAKE_CASE :
def __init__( self : Any , a__ : Optional[int] , a__ : Any=13 , a__ : int=30 , a__ : List[Any]=2 , a__ : Any=3 , a__ : str=True , a__ : Optional[int]=True , a__ : Any=32 , a__ : Union[str, Any]=5 , a__ : Union[str, Any]=4 , a__ : List[Any]=37 , a__ : Optional[int]="gelu" , a__ : List[Any]=0.1 , a__ : Union[str, Any]=0.1 , a__ : List[Any]=10 , a__ : Any=0.02 , a__ : int=None , a__ : List[str]=2 , ):
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = image_size
__magic_name__ = patch_size
__magic_name__ = num_channels
__magic_name__ = is_training
__magic_name__ = use_labels
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = scope
__magic_name__ = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__magic_name__ = (image_size // patch_size) ** 2
__magic_name__ = num_patches + 1
def snake_case__ ( self : Optional[Any] ):
__magic_name__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self : Optional[int] ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def snake_case__ ( self : Tuple , a__ : Tuple , a__ : int , a__ : Any ):
__magic_name__ = ViTModel(config=a__ )
model.to(a__ )
model.eval()
__magic_name__ = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : Optional[Any] , a__ : Optional[Any] , a__ : List[Any] , a__ : str ):
__magic_name__ = ViTForMaskedImageModeling(config=a__ )
model.to(a__ )
model.eval()
__magic_name__ = model(a__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__magic_name__ = 1
__magic_name__ = ViTForMaskedImageModeling(a__ )
model.to(a__ )
model.eval()
__magic_name__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ = model(a__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def snake_case__ ( self : List[Any] , a__ : List[str] , a__ : int , a__ : Dict ):
__magic_name__ = self.type_sequence_label_size
__magic_name__ = ViTForImageClassification(a__ )
model.to(a__ )
model.eval()
__magic_name__ = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__magic_name__ = 1
__magic_name__ = ViTForImageClassification(a__ )
model.to(a__ )
model.eval()
__magic_name__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case__ ( self : List[Any] ):
__magic_name__ = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = config_and_inputs
__magic_name__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __a ,__a ,unittest.TestCase ):
__SCREAMING_SNAKE_CASE :Union[str, Any] = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE :List[Any] = (
{"""feature-extraction""": ViTModel, """image-classification""": ViTForImageClassification}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE :str = True
__SCREAMING_SNAKE_CASE :Optional[int] = False
__SCREAMING_SNAKE_CASE :int = False
__SCREAMING_SNAKE_CASE :Optional[int] = False
def snake_case__ ( self : List[Any] ):
__magic_name__ = ViTModelTester(self )
__magic_name__ = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def snake_case__ ( self : Dict ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def snake_case__ ( self : Optional[int] ):
pass
def snake_case__ ( self : Dict ):
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__magic_name__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , nn.Linear ) )
def snake_case__ ( self : Union[str, Any] ):
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(a__ )
__magic_name__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ = [*signature.parameters.keys()]
__magic_name__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__ )
def snake_case__ ( self : Optional[int] ):
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def snake_case__ ( self : List[str] ):
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a__ )
def snake_case__ ( self : Optional[int] ):
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@slow
def snake_case__ ( self : Optional[Any] ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = ViTModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def UpperCamelCase ( ) -> Tuple:
'''simple docstring'''
__magic_name__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def snake_case__ ( self : List[str] ):
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def snake_case__ ( self : List[str] ):
__magic_name__ = ViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ).to(a__ )
__magic_name__ = self.default_image_processor
__magic_name__ = prepare_img()
__magic_name__ = image_processor(images=a__ , return_tensors='''pt''' ).to(a__ )
# forward pass
with torch.no_grad():
__magic_name__ = model(**a__ )
# verify the logits
__magic_name__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a__ )
__magic_name__ = torch.tensor([-0.2_744, 0.8_215, -0.0_836] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1E-4 ) )
@slow
def snake_case__ ( self : Optional[int] ):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
__magic_name__ = ViTModel.from_pretrained('''facebook/dino-vits8''' ).to(a__ )
__magic_name__ = ViTImageProcessor.from_pretrained('''facebook/dino-vits8''' , size=480 )
__magic_name__ = prepare_img()
__magic_name__ = image_processor(images=a__ , return_tensors='''pt''' )
__magic_name__ = inputs.pixel_values.to(a__ )
# forward pass
with torch.no_grad():
__magic_name__ = model(a__ , interpolate_pos_encoding=a__ )
# verify the logits
__magic_name__ = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , a__ )
__magic_name__ = torch.tensor(
[[4.2_340, 4.3_906, -6.6_692], [4.5_463, 1.8_928, -6.7_257], [4.4_429, 0.8_496, -5.8_585]] ).to(a__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a__ , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def snake_case__ ( self : str ):
__magic_name__ = ViTModel.from_pretrained('''facebook/dino-vits8''' , torch_dtype=torch.floataa , device_map='''auto''' )
__magic_name__ = self.default_image_processor
__magic_name__ = prepare_img()
__magic_name__ = image_processor(images=a__ , return_tensors='''pt''' )
__magic_name__ = inputs.pixel_values.to(a__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
__magic_name__ = model(a__ )
| 245 | 0 |
"""simple docstring"""
UpperCamelCase__ = frozenset(
[
'''prompt''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
]
)
UpperCamelCase__ = frozenset(['''prompt''', '''negative_prompt'''])
UpperCamelCase__ = frozenset([])
UpperCamelCase__ = frozenset(['''image'''])
UpperCamelCase__ = frozenset(
[
'''image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
UpperCamelCase__ = frozenset(['''image'''])
UpperCamelCase__ = frozenset(
[
'''prompt''',
'''image''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
]
)
UpperCamelCase__ = frozenset(['''prompt''', '''image''', '''negative_prompt'''])
UpperCamelCase__ = frozenset(
[
# Text guided image variation with an image mask
'''prompt''',
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
]
)
UpperCamelCase__ = frozenset(['''prompt''', '''image''', '''mask_image''', '''negative_prompt'''])
UpperCamelCase__ = frozenset(
[
# image variation with an image mask
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
UpperCamelCase__ = frozenset(['''image''', '''mask_image'''])
UpperCamelCase__ = frozenset(
[
'''example_image''',
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
UpperCamelCase__ = frozenset(['''example_image''', '''image''', '''mask_image'''])
UpperCamelCase__ = frozenset(['''class_labels'''])
UpperCamelCase__ = frozenset(['''class_labels'''])
UpperCamelCase__ = frozenset(['''batch_size'''])
UpperCamelCase__ = frozenset([])
UpperCamelCase__ = frozenset(['''batch_size'''])
UpperCamelCase__ = frozenset([])
UpperCamelCase__ = frozenset(
[
'''prompt''',
'''audio_length_in_s''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
]
)
UpperCamelCase__ = frozenset(['''prompt''', '''negative_prompt'''])
UpperCamelCase__ = frozenset(['''input_tokens'''])
UpperCamelCase__ = frozenset(['''input_tokens'''])
| 227 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def UpperCAmelCase ( snake_case : int , snake_case : int ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def UpperCAmelCase ( snake_case : int ):
_lowerCAmelCase:Optional[Any] = []
_lowerCAmelCase:Dict = 11
_lowerCAmelCase:int = int('''1''' + '''0''' * digit_len )
for num in range(snake_case , snake_case ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(snake_case , snake_case ):
solutions.append(F'{num}/{den}' )
den += 1
num += 1
_lowerCAmelCase:Optional[Any] = 10
return solutions
def UpperCAmelCase ( snake_case : int = 2 ):
_lowerCAmelCase:Optional[int] = 1.0
for fraction in fraction_list(snake_case ):
_lowerCAmelCase:Any = Fraction(snake_case )
result *= frac.denominator / frac.numerator
return int(snake_case )
if __name__ == "__main__":
print(solution())
| 227 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ = {
"""configuration_roc_bert""": ["""ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoCBertConfig"""],
"""tokenization_roc_bert""": ["""RoCBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoCBertForCausalLM""",
"""RoCBertForMaskedLM""",
"""RoCBertForMultipleChoice""",
"""RoCBertForPreTraining""",
"""RoCBertForQuestionAnswering""",
"""RoCBertForSequenceClassification""",
"""RoCBertForTokenClassification""",
"""RoCBertLayer""",
"""RoCBertModel""",
"""RoCBertPreTrainedModel""",
"""load_tf_weights_in_roc_bert""",
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 703 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {'''configuration_ibert''': ['''IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''IBertConfig''', '''IBertOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''IBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''IBertForMaskedLM''',
'''IBertForMultipleChoice''',
'''IBertForQuestionAnswering''',
'''IBertForSequenceClassification''',
'''IBertForTokenClassification''',
'''IBertModel''',
'''IBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 115 | 0 |
def _snake_case (_snake_case : Optional[Any]) -> bool:
if p < 2:
raise ValueError('p should not be less than 2!')
elif p == 2:
return True
_lowercase =4
_lowercase =(1 << p) - 1
for _ in range(p - 2):
_lowercase =((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 181 |
import unittest
import numpy as np
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , ) -> np.ndarray:
UpperCamelCase_: str = np.shape(UpperCAmelCase__ )
UpperCamelCase_: str = np.shape(UpperCAmelCase__ )
UpperCamelCase_: List[Any] = np.shape(UpperCAmelCase__ )
if shape_a[0] != shape_b[0]:
UpperCamelCase_: Any = (
'Expected the same number of rows for A and B. '
F'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(UpperCAmelCase__ )
if shape_b[1] != shape_c[1]:
UpperCamelCase_: int = (
'Expected the same number of columns for B and C. '
F'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(UpperCAmelCase__ )
UpperCamelCase_: Dict = pseudo_inv
if a_inv is None:
try:
UpperCamelCase_: Optional[Any] = np.linalg.inv(UpperCAmelCase__ )
except np.linalg.LinAlgError:
raise ValueError(
'Input matrix A is not invertible. Cannot compute Schur complement.' )
return mat_c - mat_b.T @ a_inv @ mat_b
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
UpperCamelCase_: Any = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCamelCase_: Dict = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCamelCase_: Tuple = np.array([[2, 1], [6, 3]] )
UpperCamelCase_: Tuple = schur_complement(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: Optional[Any] = np.block([[a, b], [b.T, c]] )
UpperCamelCase_: List[str] = np.linalg.det(_lowerCamelCase )
UpperCamelCase_: List[str] = np.linalg.det(_lowerCamelCase )
UpperCamelCase_: Dict = np.linalg.det(_lowerCamelCase )
self.assertAlmostEqual(_lowerCamelCase , det_a * det_s )
def _a ( self ):
UpperCamelCase_: int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCamelCase_: List[str] = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCamelCase_: List[str] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(_lowerCamelCase ):
schur_complement(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a ( self ):
UpperCamelCase_: List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCamelCase_: str = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCamelCase_: List[Any] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(_lowerCamelCase ):
schur_complement(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main() | 57 | 0 |
"""simple docstring"""
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class __magic_name__ ( __UpperCAmelCase ):
__A : int = ComputeEnvironment.AMAZON_SAGEMAKER
__A : Optional[int] = True
__A : List[Any] = "ml.p3.2xlarge"
__A : Optional[Any] = "accelerate_sagemaker_execution_role"
__A : str = "hf-sm"
__A : Tuple = "us-east-1"
__A : Optional[int] = 1
__A : List[str] = "accelerate-sagemaker-1"
__A : Optional[int] = "1.6"
__A : Dict = "4.4"
__A : str = "train.py"
__A : int = [
"--model_name_or_path",
"bert",
"--do_train",
"False",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
__A : Any = [
"--model_name_or_path",
"bert",
"--do_train",
"--do_test",
"False",
"--do_predict",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
class __magic_name__ ( unittest.TestCase ):
def __snake_case ( self : Dict ):
'''simple docstring'''
lowercase :List[Any] = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['''model_name_or_path'''] , snake_case__ )
assert isinstance(converted_args['''do_train'''] , snake_case__ )
assert isinstance(converted_args['''epochs'''] , snake_case__ )
assert isinstance(converted_args['''learning_rate'''] , snake_case__ )
assert isinstance(converted_args['''max_steps'''] , snake_case__ )
with pytest.raises(snake_case__ ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 475 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __magic_name__ ( __UpperCAmelCase ):
__A : torch.FloatTensor
__A : torch.FloatTensor
class __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ):
__A : Any = 1
@register_to_config
def __init__( self : int , snake_case__ : int = 2_0_0_0 , snake_case__ : float = 0.15 , snake_case__ : float = 0.01 , snake_case__ : float = 13_48.0 , snake_case__ : float = 1e-5 , snake_case__ : int = 1 , ):
'''simple docstring'''
lowercase :List[Any] = sigma_max
# setable values
lowercase :List[str] = None
self.set_sigmas(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def __snake_case ( self : Optional[Any] , snake_case__ : torch.FloatTensor , snake_case__ : Optional[int] = None ):
'''simple docstring'''
return sample
def __snake_case ( self : Optional[int] , snake_case__ : int , snake_case__ : float = None , snake_case__ : Union[str, torch.device] = None ):
'''simple docstring'''
lowercase :Optional[int] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowercase :Tuple = torch.linspace(1 , snake_case__ , snake_case__ , device=snake_case__ )
def __snake_case ( self : List[str] , snake_case__ : int , snake_case__ : float = None , snake_case__ : float = None , snake_case__ : float = None ):
'''simple docstring'''
lowercase :int = sigma_min if sigma_min is not None else self.config.sigma_min
lowercase :Any = sigma_max if sigma_max is not None else self.config.sigma_max
lowercase :Tuple = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(snake_case__ , snake_case__ )
lowercase :Union[str, Any] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowercase :str = torch.exp(torch.linspace(math.log(snake_case__ ) , math.log(snake_case__ ) , snake_case__ ) )
lowercase :List[str] = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def __snake_case ( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : int ):
'''simple docstring'''
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def __snake_case ( self : List[Any] , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : Optional[torch.Generator] = None , snake_case__ : bool = True , ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
lowercase :Optional[int] = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowercase :Optional[int] = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowercase :Union[str, Any] = timesteps.to(self.discrete_sigmas.device )
lowercase :List[Any] = self.discrete_sigmas[timesteps].to(sample.device )
lowercase :str = self.get_adjacent_sigma(snake_case__ , snake_case__ ).to(sample.device )
lowercase :Optional[Any] = torch.zeros_like(snake_case__ )
lowercase :List[Any] = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowercase :Union[str, Any] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowercase :Dict = diffusion.unsqueeze(-1 )
lowercase :List[Any] = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowercase :List[Any] = randn_tensor(
sample.shape , layout=sample.layout , generator=snake_case__ , device=sample.device , dtype=sample.dtype )
lowercase :int = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowercase :Any = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=snake_case__ , prev_sample_mean=snake_case__ )
def __snake_case ( self : Union[str, Any] , snake_case__ : torch.FloatTensor , snake_case__ : torch.FloatTensor , snake_case__ : Optional[torch.Generator] = None , snake_case__ : bool = True , ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowercase :Tuple = randn_tensor(sample.shape , layout=sample.layout , generator=snake_case__ ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowercase :List[Any] = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
lowercase :List[str] = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
lowercase :int = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowercase :Optional[Any] = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowercase :Union[str, Any] = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowercase :Union[str, Any] = step_size.unsqueeze(-1 )
lowercase :Union[str, Any] = sample + step_size * model_output
lowercase :Any = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case__ )
def __snake_case ( self : List[str] , snake_case__ : torch.FloatTensor , snake_case__ : torch.FloatTensor , snake_case__ : torch.FloatTensor , ):
'''simple docstring'''
lowercase :List[Any] = timesteps.to(original_samples.device )
lowercase :str = self.discrete_sigmas.to(original_samples.device )[timesteps]
lowercase :Optional[int] = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(snake_case__ ) * sigmas[:, None, None, None]
)
lowercase :str = noise + original_samples
return noisy_samples
def __len__( self : Optional[Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 475 | 1 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __A ( a_ : int=None )-> Tuple:
'''simple docstring'''
if subparsers is not None:
SCREAMING_SNAKE_CASE : List[str] = subparsers.add_parser('''test''' )
else:
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=a_ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=a_ )
return parser
def __A ( a_ : Any )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
SCREAMING_SNAKE_CASE : Tuple = script_name
else:
SCREAMING_SNAKE_CASE : Optional[Any] = F"--config_file={args.config_file} {script_name}"
SCREAMING_SNAKE_CASE : str = ['''accelerate-launch'''] + test_args.split()
SCREAMING_SNAKE_CASE : List[str] = execute_subprocess_async(a_ , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def __A ( )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = test_command_parser()
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
test_command(a_ )
if __name__ == "__main__":
main()
| 698 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : str = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {
"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """nllb-moe"""
UpperCamelCase = ["""past_key_values"""]
UpperCamelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self :List[str] , lowerCamelCase_ :Optional[int]=12_81_12 , lowerCamelCase_ :str=10_24 , lowerCamelCase_ :Any=12 , lowerCamelCase_ :Optional[int]=40_96 , lowerCamelCase_ :int=16 , lowerCamelCase_ :List[str]=12 , lowerCamelCase_ :Optional[int]=40_96 , lowerCamelCase_ :int=16 , lowerCamelCase_ :Union[str, Any]=0.0_5 , lowerCamelCase_ :Optional[int]=0.0_5 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Optional[Any]=True , lowerCamelCase_ :Tuple="relu" , lowerCamelCase_ :str=10_24 , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Optional[int]=0.1 , lowerCamelCase_ :List[str]=0.0 , lowerCamelCase_ :Optional[Any]=0.0_2 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :Any=False , lowerCamelCase_ :Optional[Any]="float32" , lowerCamelCase_ :Optional[Any]=False , lowerCamelCase_ :List[Any]=1_28 , lowerCamelCase_ :Any=64 , lowerCamelCase_ :Optional[int]=4 , lowerCamelCase_ :List[str]=4 , lowerCamelCase_ :Union[str, Any]=0.0_0_1 , lowerCamelCase_ :Optional[int]=0.0_0_1 , lowerCamelCase_ :List[str]="all" , lowerCamelCase_ :Optional[int]=False , lowerCamelCase_ :Any=False , lowerCamelCase_ :Tuple=1.0 , lowerCamelCase_ :Union[str, Any]=0.2 , lowerCamelCase_ :List[str]=1 , lowerCamelCase_ :Optional[int]=0 , lowerCamelCase_ :int=2 , lowerCamelCase_ :List[str]=False , **lowerCamelCase_ :int , ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : str = d_model
SCREAMING_SNAKE_CASE : Optional[int] = encoder_ffn_dim
SCREAMING_SNAKE_CASE : Any = encoder_layers
SCREAMING_SNAKE_CASE : Any = encoder_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE : str = decoder_layers
SCREAMING_SNAKE_CASE : List[Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = dropout
SCREAMING_SNAKE_CASE : List[str] = attention_dropout
SCREAMING_SNAKE_CASE : str = activation_dropout
SCREAMING_SNAKE_CASE : Any = activation_function
SCREAMING_SNAKE_CASE : Tuple = init_std
SCREAMING_SNAKE_CASE : str = encoder_layerdrop
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_layerdrop
SCREAMING_SNAKE_CASE : List[Any] = use_cache
SCREAMING_SNAKE_CASE : Optional[int] = encoder_layers
SCREAMING_SNAKE_CASE : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE : int = router_z_loss_coef
SCREAMING_SNAKE_CASE : Any = router_aux_loss_coef
SCREAMING_SNAKE_CASE : str = decoder_sparse_step
SCREAMING_SNAKE_CASE : str = encoder_sparse_step
SCREAMING_SNAKE_CASE : List[str] = num_experts
SCREAMING_SNAKE_CASE : Union[str, Any] = expert_capacity
SCREAMING_SNAKE_CASE : Tuple = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
SCREAMING_SNAKE_CASE : Union[str, Any] = router_dtype
SCREAMING_SNAKE_CASE : Union[str, Any] = router_ignore_padding_tokens
SCREAMING_SNAKE_CASE : int = batch_prioritized_routing
SCREAMING_SNAKE_CASE : Optional[int] = second_expert_policy
SCREAMING_SNAKE_CASE : Union[str, Any] = normalize_router_prob_before_dropping
SCREAMING_SNAKE_CASE : Any = moe_eval_capacity_token_fraction
SCREAMING_SNAKE_CASE : Optional[Any] = moe_token_dropout
SCREAMING_SNAKE_CASE : Tuple = output_router_logits
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
| 698 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
_lowercase : Union[str, Any] =logging.get_logger(__name__)
_lowercase : Dict ={
"""microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"""
),
"""microsoft/deberta-v2-xxlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE_ ( UpperCamelCase_ ):
'''simple docstring'''
lowercase : Dict = "deberta-v2"
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Dict=12_81_00 , SCREAMING_SNAKE_CASE__ : Any=15_36 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=24 , SCREAMING_SNAKE_CASE__ : Optional[int]=24 , SCREAMING_SNAKE_CASE__ : Any=61_44 , SCREAMING_SNAKE_CASE__ : List[str]="gelu" , SCREAMING_SNAKE_CASE__ : List[str]=0.1 , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : Dict=5_12 , SCREAMING_SNAKE_CASE__ : Optional[int]=0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.0_2 , SCREAMING_SNAKE_CASE__ : str=1e-7 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : str=-1 , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Any=0 , SCREAMING_SNAKE_CASE__ : Any="gelu" , **SCREAMING_SNAKE_CASE__ : Optional[int] , ) -> Any:
super().__init__(**__A )
A : Optional[int] =hidden_size
A : Union[str, Any] =num_hidden_layers
A : Optional[int] =num_attention_heads
A : Tuple =intermediate_size
A : Union[str, Any] =hidden_act
A : Any =hidden_dropout_prob
A : Tuple =attention_probs_dropout_prob
A : Tuple =max_position_embeddings
A : List[Any] =type_vocab_size
A : int =initializer_range
A : List[Any] =relative_attention
A : Dict =max_relative_positions
A : Any =pad_token_id
A : Any =position_biased_input
# Backwards compatibility
if type(__A ) == str:
A : List[Any] =[x.strip() for x in pos_att_type.lower().split('|' )]
A : List[str] =pos_att_type
A : Tuple =vocab_size
A : List[str] =layer_norm_eps
A : List[str] =kwargs.get('pooler_hidden_size' , __A )
A : Tuple =pooler_dropout
A : str =pooler_hidden_act
class SCREAMING_SNAKE_CASE_ ( UpperCamelCase_ ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Union[str, Any]:
if self.task == "multiple-choice":
A : int ={0: "batch", 1: "choice", 2: "sequence"}
else:
A : List[Any] ={0: "batch", 1: "sequence"}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> int:
return 12
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional["TensorType"] = None , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : int = 40 , SCREAMING_SNAKE_CASE__ : int = 40 , SCREAMING_SNAKE_CASE__ : "PreTrainedTokenizerBase" = None , ) -> int:
A : int =super().generate_dummy_inputs(preprocessor=__A , framework=__A )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 703 | import heapq
def A__ ( lowercase: dict ) -> set[int]:
A : list[list] =[]
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowercase, [-1 * len(lowercase ), (key, value)] )
# chosen_vertices = set of chosen vertices
A : Dict =set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
A : List[str] =heapq.heappop(lowercase )[1][0]
chosen_vertices.add(lowercase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
A : str =elem[1][1].index(lowercase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowercase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : List[Any] ={0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 661 | 0 |
import numpy
# List of input, output pairs
lowercase = (
((5, 2, 3), 1_5),
((6, 5, 9), 2_5),
((1_1, 1_2, 1_3), 4_1),
((1, 1, 1), 8),
((1_1, 1_2, 1_3), 4_1),
)
lowercase = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0))
lowercase = [2, 4, 1, 5]
lowercase = len(train_data)
lowercase = 0.009
def __lowerCAmelCase ( UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any]="train" ) -> Optional[int]:
return calculate_hypothesis_value(UpperCAmelCase__ , UpperCAmelCase__ ) - output(
UpperCAmelCase__ , UpperCAmelCase__ )
def __lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any] ) -> Optional[int]:
lowerCamelCase_ = 0
for i in range(len(UpperCAmelCase__ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def __lowerCAmelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] ) -> Optional[Any]:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def __lowerCAmelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] ) -> Union[str, Any]:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def __lowerCAmelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str]=m ) -> Optional[int]:
lowerCamelCase_ = 0
for i in range(UpperCAmelCase__ ):
if index == -1:
summation_value += _error(UpperCAmelCase__ )
else:
summation_value += _error(UpperCAmelCase__ ) * train_data[i][0][index]
return summation_value
def __lowerCAmelCase ( UpperCAmelCase__ : List[Any] ) -> Tuple:
lowerCamelCase_ = summation_of_cost_derivative(UpperCAmelCase__ , UpperCAmelCase__ ) / m
return cost_derivative_value
def __lowerCAmelCase ( ) -> Any:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
lowerCamelCase_ = 0.0_0_0_0_0_2
lowerCamelCase_ = 0
lowerCamelCase_ = 0
while True:
j += 1
lowerCamelCase_ = [0, 0, 0, 0]
for i in range(0 , len(UpperCAmelCase__ ) ):
lowerCamelCase_ = get_cost_derivative(i - 1 )
lowerCamelCase_ = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
UpperCAmelCase__ , UpperCAmelCase__ , atol=UpperCAmelCase__ , rtol=UpperCAmelCase__ , ):
break
lowerCamelCase_ = temp_parameter_vector
print(("""Number of iterations:""", j) )
def __lowerCAmelCase ( ) -> str:
for i in range(len(UpperCAmelCase__ ) ):
print(("""Actual output value:""", output(UpperCAmelCase__ , """test""" )) )
print(("""Hypothesis output:""", calculate_hypothesis_value(UpperCAmelCase__ , """test""" )) )
if __name__ == "__main__":
run_gradient_descent()
print('''\nTesting gradient descent for a linear hypothesis function.\n''')
test_gradient_descent()
| 272 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowercase = '''platform'''
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def __lowerCAmelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Dict=None , ) -> List[Any]:
if attention_mask is None:
lowerCamelCase_ = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowerCamelCase_ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowerCamelCase_ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class __A:
def __init__( self : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : str=1_3 , __UpperCamelCase : List[str]=7 , __UpperCamelCase : Tuple=True , __UpperCamelCase : List[Any]=False , __UpperCamelCase : Dict=9_9 , __UpperCamelCase : Tuple=1_6 , __UpperCamelCase : Any=2 , __UpperCamelCase : Dict=4 , __UpperCamelCase : List[str]=4 , __UpperCamelCase : Optional[Any]="gelu" , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : str=0.1 , __UpperCamelCase : Union[str, Any]=3_2 , __UpperCamelCase : str=2 , __UpperCamelCase : Tuple=1 , __UpperCamelCase : Optional[Any]=0 , __UpperCamelCase : str=0.02 , ):
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = eos_token_id
lowerCamelCase_ = pad_token_id
lowerCamelCase_ = bos_token_id
lowerCamelCase_ = initializer_range
def lowercase__ ( self : str ):
lowerCamelCase_ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
lowerCamelCase_ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
lowerCamelCase_ = shift_tokens_right(__UpperCamelCase , 1 , 2 )
lowerCamelCase_ = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=__UpperCamelCase , )
lowerCamelCase_ = prepare_blenderbot_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, inputs_dict
def lowercase__ ( self : List[str] ):
lowerCamelCase_ , lowerCamelCase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase__ ( self : Tuple , __UpperCamelCase : int , __UpperCamelCase : List[str] , __UpperCamelCase : str ):
lowerCamelCase_ = 2_0
lowerCamelCase_ = model_class_name(__UpperCamelCase )
lowerCamelCase_ = model.encode(inputs_dict["""input_ids"""] )
lowerCamelCase_ , lowerCamelCase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowerCamelCase_ = model.init_cache(decoder_input_ids.shape[0] , __UpperCamelCase , __UpperCamelCase )
lowerCamelCase_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
lowerCamelCase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase_ = model.decode(
decoder_input_ids[:, :-1] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
lowerCamelCase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
lowerCamelCase_ = model.decode(
decoder_input_ids[:, -1:] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__UpperCamelCase , )
lowerCamelCase_ = model.decode(__UpperCamelCase , __UpperCamelCase )
lowerCamelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def lowercase__ ( self : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[str] ):
lowerCamelCase_ = 2_0
lowerCamelCase_ = model_class_name(__UpperCamelCase )
lowerCamelCase_ = model.encode(inputs_dict["""input_ids"""] )
lowerCamelCase_ , lowerCamelCase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowerCamelCase_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowerCamelCase_ = model.init_cache(decoder_input_ids.shape[0] , __UpperCamelCase , __UpperCamelCase )
lowerCamelCase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase_ = model.decode(
decoder_input_ids[:, :-1] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
lowerCamelCase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
lowerCamelCase_ = model.decode(
decoder_input_ids[:, -1:] , __UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
lowerCamelCase_ = model.decode(__UpperCamelCase , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase )
lowerCamelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
@require_flax
class __A( unittest.TestCase ):
SCREAMING_SNAKE_CASE = 9_9
def lowercase__ ( self : Dict ):
lowerCamelCase_ = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
lowerCamelCase_ = input_ids.shape[0]
lowerCamelCase_ = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowercase__ ( self : List[Any] ):
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = self._get_config_and_data()
lowerCamelCase_ = FlaxBlenderbotSmallForConditionalGeneration(__UpperCamelCase )
lowerCamelCase_ = lm_model(input_ids=__UpperCamelCase )
lowerCamelCase_ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , __UpperCamelCase )
def lowercase__ ( self : Tuple ):
lowerCamelCase_ = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
lowerCamelCase_ = FlaxBlenderbotSmallForConditionalGeneration(__UpperCamelCase )
lowerCamelCase_ = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
lowerCamelCase_ = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
lowerCamelCase_ = lm_model(input_ids=__UpperCamelCase , decoder_input_ids=__UpperCamelCase )
lowerCamelCase_ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , __UpperCamelCase )
def lowercase__ ( self : int ):
lowerCamelCase_ = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
lowerCamelCase_ = shift_tokens_right(__UpperCamelCase , 1 , 2 )
lowerCamelCase_ = np.equal(__UpperCamelCase , 1 ).astype(np.floataa ).sum()
lowerCamelCase_ = np.equal(__UpperCamelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(__UpperCamelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class __A( UpperCAmelCase , unittest.TestCase , UpperCAmelCase ):
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
SCREAMING_SNAKE_CASE = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def lowercase__ ( self : Optional[Any] ):
lowerCamelCase_ = FlaxBlenderbotSmallModelTester(self )
def lowercase__ ( self : str ):
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : Optional[int] ):
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : List[str] ):
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase_ = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
lowerCamelCase_ = model_class(__UpperCamelCase )
@jax.jit
def encode_jitted(__UpperCamelCase : List[Any] , __UpperCamelCase : Dict=None , **__UpperCamelCase : Tuple ):
return model.encode(input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase )
with self.subTest("""JIT Enabled""" ):
lowerCamelCase_ = encode_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
lowerCamelCase_ = encode_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase__ ( self : Any ):
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase_ = model_class(__UpperCamelCase )
lowerCamelCase_ = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
lowerCamelCase_ = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(__UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] ):
return model.decode(
decoder_input_ids=__UpperCamelCase , decoder_attention_mask=__UpperCamelCase , encoder_outputs=__UpperCamelCase , )
with self.subTest("""JIT Enabled""" ):
lowerCamelCase_ = decode_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
lowerCamelCase_ = decode_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowercase__ ( self : str ):
for model_class_name in self.all_model_classes:
lowerCamelCase_ = model_class_name.from_pretrained("""facebook/blenderbot_small-90M""" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowerCamelCase_ = np.ones((1, 1) ) * model.config.eos_token_id
lowerCamelCase_ = model(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
| 272 | 1 |
'''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
_lowercase =42
_lowercase =None
def lowerCamelCase__ ( __lowerCAmelCase : int , __lowerCAmelCase : Dict=0.999 , __lowerCAmelCase : Optional[Any]="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(__lowerCAmelCase : Any ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__lowerCAmelCase : Union[str, Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
lowerCAmelCase_ = []
for i in range(__UpperCamelCase ):
lowerCAmelCase_ = i / num_diffusion_timesteps
lowerCAmelCase_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCamelCase ) / alpha_bar_fn(__UpperCamelCase ) , __UpperCamelCase ) )
return torch.tensor(__UpperCamelCase , dtype=torch.floataa )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
@register_to_config
def __init__( self , _UpperCamelCase = 1_000 , _UpperCamelCase = "fixed_small_log" , _UpperCamelCase = True , _UpperCamelCase = 1.0 , _UpperCamelCase = "epsilon" , _UpperCamelCase = "squaredcos_cap_v2" , ) -> Tuple:
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
lowerCAmelCase_ = betas_for_alpha_bar(_lowercase )
lowerCAmelCase_ = 1.0 - self.betas
lowerCAmelCase_ = torch.cumprod(self.alphas , dim=0 )
lowerCAmelCase_ = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
lowerCAmelCase_ = 1.0
# setable values
lowerCAmelCase_ = None
lowerCAmelCase_ = torch.from_numpy(np.arange(0 , _lowercase )[::-1].copy() )
lowerCAmelCase_ = variance_type
def __a ( self , _UpperCamelCase , _UpperCamelCase = None ) -> torch.FloatTensor:
return sample
def __a ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Union[str, Any]:
lowerCAmelCase_ = num_inference_steps
lowerCAmelCase_ = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
lowerCAmelCase_ = (np.arange(0 , _lowercase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
lowerCAmelCase_ = torch.from_numpy(_lowercase ).to(_lowercase )
def __a ( self , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None ) -> Any:
if prev_timestep is None:
lowerCAmelCase_ = t - 1
lowerCAmelCase_ = self.alphas_cumprod[t]
lowerCAmelCase_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
lowerCAmelCase_ = 1 - alpha_prod_t
lowerCAmelCase_ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
lowerCAmelCase_ = self.betas[t]
else:
lowerCAmelCase_ = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowerCAmelCase_ = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
lowerCAmelCase_ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
lowerCAmelCase_ = torch.log(torch.clamp(_lowercase , min=1e-2_0 ) )
lowerCAmelCase_ = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
lowerCAmelCase_ = variance.log()
lowerCAmelCase_ = beta.log()
lowerCAmelCase_ = (predicted_variance + 1) / 2
lowerCAmelCase_ = frac * max_log + (1 - frac) * min_log
return variance
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase=None , _UpperCamelCase = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
lowerCAmelCase_ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
lowerCAmelCase_ = torch.split(_lowercase , sample.shape[1] , dim=1 )
else:
lowerCAmelCase_ = None
# 1. compute alphas, betas
if prev_timestep is None:
lowerCAmelCase_ = t - 1
lowerCAmelCase_ = self.alphas_cumprod[t]
lowerCAmelCase_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
lowerCAmelCase_ = 1 - alpha_prod_t
lowerCAmelCase_ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
lowerCAmelCase_ = self.betas[t]
lowerCAmelCase_ = self.alphas[t]
else:
lowerCAmelCase_ = 1 - alpha_prod_t / alpha_prod_t_prev
lowerCAmelCase_ = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowerCAmelCase_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowerCAmelCase_ = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowerCAmelCase_ = torch.clamp(
_lowercase , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase_ = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
lowerCAmelCase_ = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase_ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
lowerCAmelCase_ = 0
if t > 0:
lowerCAmelCase_ = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=_lowercase , device=model_output.device )
lowerCAmelCase_ = self._get_variance(
_lowercase , predicted_variance=_lowercase , prev_timestep=_lowercase , )
if self.variance_type == "fixed_small_log":
lowerCAmelCase_ = variance
elif self.variance_type == "learned_range":
lowerCAmelCase_ = (0.5 * variance).exp()
else:
raise ValueError(
f"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
" for the UnCLIPScheduler." )
lowerCAmelCase_ = variance * variance_noise
lowerCAmelCase_ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=_lowercase , pred_original_sample=_lowercase )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> torch.FloatTensor:
lowerCAmelCase_ = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
lowerCAmelCase_ = timesteps.to(original_samples.device )
lowerCAmelCase_ = alphas_cumprod[timesteps] ** 0.5
lowerCAmelCase_ = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
lowerCAmelCase_ = sqrt_alpha_prod.unsqueeze(-1 )
lowerCAmelCase_ = (1 - alphas_cumprod[timesteps]) ** 0.5
lowerCAmelCase_ = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
lowerCAmelCase_ = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
lowerCAmelCase_ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 721 |
from ...processing_utils import ProcessorMixin
class _lowerCAmelCase ( __a ):
_lowercase ='''SpeechT5FeatureExtractor'''
_lowercase ='''SpeechT5Tokenizer'''
def __init__( self , _UpperCamelCase , _UpperCamelCase ) -> int:
super().__init__(_UpperCamelCase , _UpperCamelCase )
def __call__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[int]:
lowerCAmelCase_ = kwargs.pop("audio" , _UpperCamelCase )
lowerCAmelCase_ = kwargs.pop("text" , _UpperCamelCase )
lowerCAmelCase_ = kwargs.pop("text_target" , _UpperCamelCase )
lowerCAmelCase_ = kwargs.pop("audio_target" , _UpperCamelCase )
lowerCAmelCase_ = kwargs.pop("sampling_rate" , _UpperCamelCase )
if audio is not None and text is not None:
raise ValueError(
"Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?" )
if audio_target is not None and text_target is not None:
raise ValueError(
"Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process." )
if audio is not None:
lowerCAmelCase_ = self.feature_extractor(_UpperCamelCase , *_UpperCamelCase , sampling_rate=_UpperCamelCase , **_UpperCamelCase )
elif text is not None:
lowerCAmelCase_ = self.tokenizer(_UpperCamelCase , **_UpperCamelCase )
else:
lowerCAmelCase_ = None
if audio_target is not None:
lowerCAmelCase_ = self.feature_extractor(audio_target=_UpperCamelCase , *_UpperCamelCase , sampling_rate=_UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase_ = targets["input_values"]
elif text_target is not None:
lowerCAmelCase_ = self.tokenizer(_UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase_ = targets["input_ids"]
else:
lowerCAmelCase_ = None
if inputs is None:
return targets
if targets is not None:
lowerCAmelCase_ = labels
lowerCAmelCase_ = targets.get("attention_mask" )
if decoder_attention_mask is not None:
lowerCAmelCase_ = decoder_attention_mask
return inputs
def __a ( self , *_UpperCamelCase , **_UpperCamelCase ) -> str:
lowerCAmelCase_ = kwargs.pop("input_values" , _UpperCamelCase )
lowerCAmelCase_ = kwargs.pop("input_ids" , _UpperCamelCase )
lowerCAmelCase_ = kwargs.pop("labels" , _UpperCamelCase )
if input_values is not None and input_ids is not None:
raise ValueError("Cannot process both `input_values` and `input_ids` inputs." )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded." )
if input_values is not None:
lowerCAmelCase_ = self.feature_extractor.pad(_UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase )
elif input_ids is not None:
lowerCAmelCase_ = self.tokenizer.pad(_UpperCamelCase , **_UpperCamelCase )
else:
lowerCAmelCase_ = None
if labels is not None:
if "input_ids" in labels or (isinstance(_UpperCamelCase , _UpperCamelCase ) and "input_ids" in labels[0]):
lowerCAmelCase_ = self.tokenizer.pad(_UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase_ = targets["input_ids"]
else:
lowerCAmelCase_ = self.feature_extractor.feature_size
lowerCAmelCase_ = self.feature_extractor.num_mel_bins
lowerCAmelCase_ = self.feature_extractor.pad(_UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase_ = feature_size_hack
lowerCAmelCase_ = targets["input_values"]
else:
lowerCAmelCase_ = None
if inputs is None:
return targets
if targets is not None:
lowerCAmelCase_ = labels
lowerCAmelCase_ = targets.get("attention_mask" )
if decoder_attention_mask is not None:
lowerCAmelCase_ = decoder_attention_mask
return inputs
def __a ( self , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]:
return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase )
def __a ( self , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase )
| 279 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( ) -> Dict:
_SCREAMING_SNAKE_CASE = 0
for i in range(1 , 10_01 ):
total += i**i
return str(__A )[-10:]
if __name__ == "__main__":
print(solution())
| 418 |
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : int = 3_2 , __lowerCamelCase : bool = True , __lowerCamelCase : Union[int, float] = 1 / 2_5_5 , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[float, List[float]]] = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , __lowerCamelCase : Optional[Union[float, List[float]]] = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , __lowerCamelCase : bool = True , __lowerCamelCase : str=7 , __lowerCamelCase : Union[str, Any]=3_0 , __lowerCamelCase : Tuple=4_0_0 , __lowerCamelCase : List[Any]=3 , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = do_resize
_SCREAMING_SNAKE_CASE = size if size is not None else {"shortest_edge": 2_8_8}
_SCREAMING_SNAKE_CASE = size_divisor
_SCREAMING_SNAKE_CASE = do_rescale
_SCREAMING_SNAKE_CASE = rescale_factor
_SCREAMING_SNAKE_CASE = do_normalize
_SCREAMING_SNAKE_CASE = do_center_crop
_SCREAMING_SNAKE_CASE = image_mean
_SCREAMING_SNAKE_CASE = image_std
_SCREAMING_SNAKE_CASE = do_pad
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = min_resolution
_SCREAMING_SNAKE_CASE = max_resolution
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def lowerCAmelCase_ ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : int=False ):
"""simple docstring"""
if not batched:
_SCREAMING_SNAKE_CASE = self.size["shortest_edge"]
_SCREAMING_SNAKE_CASE = image_inputs[0]
if isinstance(__lowerCamelCase , Image.Image ):
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = image.size
else:
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = image.shape[1], image.shape[2]
_SCREAMING_SNAKE_CASE = size / min(__lowerCamelCase , __lowerCamelCase )
if h < w:
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = size, scale * w
else:
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = scale * h, size
_SCREAMING_SNAKE_CASE = int((1_3_3_3 / 8_0_0) * size )
if max(__lowerCamelCase , __lowerCamelCase ) > max_size:
_SCREAMING_SNAKE_CASE = max_size / max(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = newh * scale
_SCREAMING_SNAKE_CASE = neww * scale
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = int(newh + 0.5 ), int(neww + 0.5 )
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
_SCREAMING_SNAKE_CASE = []
for image in image_inputs:
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_SCREAMING_SNAKE_CASE = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0]
_SCREAMING_SNAKE_CASE = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase_ ( A , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase_ = BridgeTowerImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = BridgeTowerImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(__lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "size" ) )
self.assertTrue(hasattr(__lowerCamelCase , "size_divisor" ) )
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
# Initialize image processor
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
# Initialize image processor
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
# Initialize image processor
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 418 | 1 |
'''simple docstring'''
from random import shuffle
import tensorflow as tf
from numpy import array
def _UpperCamelCase ( _a : Optional[int] , _a : str ):
"""simple docstring"""
__UpperCamelCase : Tuple = int(_a )
assert noofclusters < len(_a )
# Find out the dimensionality
__UpperCamelCase : Dict = len(vectors[0] )
# Will help select random centroids from among the available vectors
__UpperCamelCase : List[Any] = list(range(len(_a ) ) )
shuffle(_a )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
__UpperCamelCase : int = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
__UpperCamelCase : Optional[int] = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
__UpperCamelCase : Optional[int] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(_a )
]
##These nodes will assign the centroid Variables the appropriate
##values
__UpperCamelCase : Dict = tf.placeholder('float64' , [dim] )
__UpperCamelCase : Tuple = []
for centroid in centroids:
cent_assigns.append(tf.assign(_a , _a ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
__UpperCamelCase : int = [tf.Variable(0 ) for i in range(len(_a ) )]
##These nodes will assign an assignment Variable the appropriate
##value
__UpperCamelCase : List[Any] = tf.placeholder('int32' )
__UpperCamelCase : Optional[Any] = []
for assignment in assignments:
cluster_assigns.append(tf.assign(_a , _a ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
__UpperCamelCase : List[Any] = tf.placeholder('float' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
__UpperCamelCase : Union[str, Any] = tf.reduce_mean(_a , 0 )
##Node for computing Euclidean distances
# Placeholders for input
__UpperCamelCase : Any = tf.placeholder('float' , [dim] )
__UpperCamelCase : Union[str, Any] = tf.placeholder('float' , [dim] )
__UpperCamelCase : str = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(_a , _a ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
__UpperCamelCase : List[Any] = tf.placeholder('float' , [noofclusters] )
__UpperCamelCase : Dict = tf.argmin(_a , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
__UpperCamelCase : Any = tf.initialize_all_variables()
# Initialize all variables
sess.run(_a )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
__UpperCamelCase : Dict = 1_0_0
for _ in range(_a ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(_a ) ):
__UpperCamelCase : str = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
__UpperCamelCase : Optional[int] = [
sess.run(_a , feed_dict={va: vect, va: sess.run(_a )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
__UpperCamelCase : Optional[int] = sess.run(
_a , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(_a ):
# Collect all the vectors assigned to this cluster
__UpperCamelCase : Tuple = [
vectors[i]
for i in range(len(_a ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
__UpperCamelCase : Optional[int] = sess.run(
_a , feed_dict={mean_input: array(_a )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
__UpperCamelCase : Optional[Any] = sess.run(_a )
__UpperCamelCase : Optional[Any] = sess.run(_a )
return centroids, assignments
| 710 | '''simple docstring'''
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __lowercase ( _lowerCamelCase ):
"""simple docstring"""
@slow
@require_torch
def lowerCAmelCase ( self ):
__UpperCamelCase : List[Any] = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
__UpperCamelCase : Optional[int] = BertTokenizer.from_pretrained('bert-base-uncased' )
__UpperCamelCase : str = bertabert.config.encoder.vocab_size
__UpperCamelCase : Optional[Any] = tokenizer.sep_token_id
__UpperCamelCase : Any = tokenizer.cls_token_id
__UpperCamelCase : Optional[Any] = 1_2_8
__UpperCamelCase : str = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
__UpperCamelCase : Tuple = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
__UpperCamelCase : Dict = train_dataset.select(range(3_2 ) )
__UpperCamelCase : List[Any] = val_dataset.select(range(1_6 ) )
__UpperCamelCase : Optional[int] = 4
def _map_to_encoder_decoder_inputs(_lowerCamelCase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__UpperCamelCase : Union[str, Any] = tokenizer(batch['article'] , padding='max_length' , truncation=_lowerCamelCase , max_length=5_1_2 )
__UpperCamelCase : Optional[int] = tokenizer(batch['highlights'] , padding='max_length' , truncation=_lowerCamelCase , max_length=1_2_8 )
__UpperCamelCase : Any = inputs.input_ids
__UpperCamelCase : Optional[Any] = inputs.attention_mask
__UpperCamelCase : Optional[Any] = outputs.input_ids
__UpperCamelCase : List[Any] = outputs.input_ids.copy()
__UpperCamelCase : Optional[Any] = [
[-1_0_0 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
__UpperCamelCase : Union[str, Any] = outputs.attention_mask
assert all(len(_lowerCamelCase ) == 5_1_2 for x in inputs.input_ids )
assert all(len(_lowerCamelCase ) == 1_2_8 for x in outputs.input_ids )
return batch
def _compute_metrics(_lowerCamelCase ):
__UpperCamelCase : str = pred.label_ids
__UpperCamelCase : Dict = pred.predictions
# all unnecessary tokens are removed
__UpperCamelCase : Tuple = tokenizer.batch_decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
__UpperCamelCase : Optional[Any] = tokenizer.batch_decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
__UpperCamelCase : Dict = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_lowerCamelCase ) )] ) / len(_lowerCamelCase )
return {"accuracy": accuracy}
# map train dataset
__UpperCamelCase : str = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_lowerCamelCase , batch_size=_lowerCamelCase , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
__UpperCamelCase : Any = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_lowerCamelCase , batch_size=_lowerCamelCase , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
__UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = SeqaSeqTrainingArguments(
output_dir=_lowerCamelCase , per_device_train_batch_size=_lowerCamelCase , per_device_eval_batch_size=_lowerCamelCase , predict_with_generate=_lowerCamelCase , evaluation_strategy='steps' , do_train=_lowerCamelCase , do_eval=_lowerCamelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__UpperCamelCase : str = SeqaSeqTrainer(
model=_lowerCamelCase , args=_lowerCamelCase , compute_metrics=_compute_metrics , train_dataset=_lowerCamelCase , eval_dataset=_lowerCamelCase , tokenizer=_lowerCamelCase , )
# start training
trainer.train()
| 287 | 0 |
import itertools
import math
def lowerCAmelCase ( UpperCamelCase__ : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase ( ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[str] = 2
while True:
if is_prime(UpperCamelCase__ ):
yield num
num += 1
def lowerCAmelCase ( UpperCamelCase__ : int = 10_001 ) -> int:
"""simple docstring"""
return next(itertools.islice(prime_generator() , nth - 1 , UpperCamelCase__ ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 202 |
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class a ( __lowercase ):
SCREAMING_SNAKE_CASE__ : Any = ComputeEnvironment.AMAZON_SAGEMAKER
SCREAMING_SNAKE_CASE__ : Optional[int] = True
SCREAMING_SNAKE_CASE__ : int = '''ml.p3.2xlarge'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''accelerate_sagemaker_execution_role'''
SCREAMING_SNAKE_CASE__ : List[Any] = '''hf-sm'''
SCREAMING_SNAKE_CASE__ : Any = '''us-east-1'''
SCREAMING_SNAKE_CASE__ : Any = 1
SCREAMING_SNAKE_CASE__ : Dict = '''accelerate-sagemaker-1'''
SCREAMING_SNAKE_CASE__ : Dict = '''1.6'''
SCREAMING_SNAKE_CASE__ : int = '''4.4'''
SCREAMING_SNAKE_CASE__ : int = '''train.py'''
SCREAMING_SNAKE_CASE__ : Tuple = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''False''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
SCREAMING_SNAKE_CASE__ : List[str] = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''--do_test''',
'''False''',
'''--do_predict''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
class a ( unittest.TestCase ):
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['''model_name_or_path'''] , _lowerCAmelCase )
assert isinstance(converted_args['''do_train'''] , _lowerCAmelCase )
assert isinstance(converted_args['''epochs'''] , _lowerCAmelCase )
assert isinstance(converted_args['''learning_rate'''] , _lowerCAmelCase )
assert isinstance(converted_args['''max_steps'''] , _lowerCAmelCase )
with pytest.raises(_lowerCAmelCase ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 202 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _lowercase ( __a , __a , unittest.TestCase ):
_UpperCAmelCase = StableDiffusionPanoramaPipeline
_UpperCAmelCase = TEXT_TO_IMAGE_PARAMS
_UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase ( self ) -> List[str]:
torch.manual_seed(0 )
snake_case = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
snake_case = DDIMScheduler()
torch.manual_seed(0 )
snake_case = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
snake_case = CLIPTextModel(A__ )
snake_case = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase ( self , A__ , A__=0 ) -> Optional[Any]:
snake_case = torch.manual_seed(A__ )
snake_case = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
# Setting height and width to None to prevent OOMs on CPU.
'''height''': None,
'''width''': None,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase ( self ) -> List[str]:
snake_case = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case = self.get_dummy_components()
snake_case = StableDiffusionPanoramaPipeline(**A__ )
snake_case = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
snake_case = self.get_dummy_inputs(A__ )
snake_case = sd_pipe(**A__ ).images
snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case = np.array([0.6_1_8_6, 0.5_3_7_4, 0.4_9_1_5, 0.4_1_3_5, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_7, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase ( self ) -> List[Any]:
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCamelCase ( self ) -> Optional[int]:
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25e-3 )
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case = self.get_dummy_components()
snake_case = StableDiffusionPanoramaPipeline(**A__ )
snake_case = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
snake_case = self.get_dummy_inputs(A__ )
snake_case = '''french fries'''
snake_case = sd_pipe(**A__ , negative_prompt=A__ )
snake_case = output.images
snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase ( self ) -> str:
snake_case = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case = self.get_dummy_components()
snake_case = StableDiffusionPanoramaPipeline(**A__ )
snake_case = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
snake_case = self.get_dummy_inputs(A__ )
snake_case = sd_pipe(**A__ , view_batch_size=2 )
snake_case = output.images
snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case = self.get_dummy_components()
snake_case = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' )
snake_case = StableDiffusionPanoramaPipeline(**A__ )
snake_case = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
snake_case = self.get_dummy_inputs(A__ )
snake_case = sd_pipe(**A__ ).images
snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case = np.array([0.4_0_2_4, 0.6_5_1_0, 0.4_9_0_1, 0.5_3_7_8, 0.5_8_1_3, 0.5_6_2_2, 0.4_7_9_5, 0.4_4_6_7, 0.4_9_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase ( self ) -> int:
snake_case = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case = self.get_dummy_components()
snake_case = PNDMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , skip_prk_steps=A__ )
snake_case = StableDiffusionPanoramaPipeline(**A__ )
snake_case = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
snake_case = self.get_dummy_inputs(A__ )
snake_case = sd_pipe(**A__ ).images
snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case = np.array([0.6_3_9_1, 0.6_2_9_1, 0.4_8_6_1, 0.5_1_3_4, 0.5_5_5_2, 0.4_5_7_8, 0.5_0_3_2, 0.5_0_2_3, 0.4_5_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def UpperCamelCase ( self ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self , A__=0 ) -> str:
snake_case = torch.manual_seed(A__ )
snake_case = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase ( self ) -> Any:
snake_case = '''stabilityai/stable-diffusion-2-base'''
snake_case = DDIMScheduler.from_pretrained(A__ , subfolder='''scheduler''' )
snake_case = StableDiffusionPanoramaPipeline.from_pretrained(A__ , scheduler=A__ , safety_checker=A__ )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing()
snake_case = self.get_inputs()
snake_case = pipe(**A__ ).images
snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
snake_case = np.array(
[
0.3_6_9_6_8_3_9_2,
0.2_7_0_2_5_3_7_2,
0.3_2_4_4_6_7_6_6,
0.2_8_3_7_9_3_8_7,
0.3_6_3_6_3_2_7_4,
0.3_0_7_3_3_3_4_7,
0.2_7_1_0_0_0_2_7,
0.2_7_0_5_4_1_2_5,
0.2_5_5_3_6_0_9_6,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def UpperCamelCase ( self ) -> Tuple:
snake_case = StableDiffusionPanoramaPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-base''' , safety_checker=A__ )
snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing()
snake_case = self.get_inputs()
snake_case = pipe(**A__ ).images
snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
snake_case = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = 0
def callback_fn(A__ , A__ , A__ ) -> None:
snake_case = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
snake_case = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
snake_case = latents[0, -3:, -3:, -1]
snake_case = np.array(
[
0.1_8_6_8_1_8_6_9,
0.3_3_9_0_7_8_1_6,
0.5_3_6_1_2_7_6,
0.1_4_4_3_2_8_6_5,
-0.0_2_8_5_6_6_1_1,
-0.7_3_9_4_1_1_2_3,
0.2_3_3_9_7_9_8_7,
0.4_7_3_2_2_6_8_2,
-0.3_7_8_2_3_1_6_4,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
snake_case = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
snake_case = latents[0, -3:, -3:, -1]
snake_case = np.array(
[
0.1_8_5_3_9_6_4_5,
0.3_3_9_8_7_2_4_8,
0.5_3_7_8_5_5_9,
0.1_4_4_3_7_1_4_2,
-0.0_2_4_5_5_2_6_1,
-0.7_3_3_8_3_1_7,
0.2_3_9_9_0_7_5_5,
0.4_7_3_5_6_2_7_2,
-0.3_7_8_6_5_0_5,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
snake_case = False
snake_case = '''stabilityai/stable-diffusion-2-base'''
snake_case = DDIMScheduler.from_pretrained(A__ , subfolder='''scheduler''' )
snake_case = StableDiffusionPanoramaPipeline.from_pretrained(A__ , scheduler=A__ , safety_checker=A__ )
snake_case = pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing()
snake_case = self.get_inputs()
pipe(**A__ , callback=A__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def UpperCamelCase ( self ) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case = '''stabilityai/stable-diffusion-2-base'''
snake_case = DDIMScheduler.from_pretrained(A__ , subfolder='''scheduler''' )
snake_case = StableDiffusionPanoramaPipeline.from_pretrained(A__ , scheduler=A__ , safety_checker=A__ )
snake_case = pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case = self.get_inputs()
snake_case = pipe(**A__ )
snake_case = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 44 |
'''simple docstring'''
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def __UpperCamelCase ( a : List[str] ) ->str:
snake_case = []
for line in lines:
snake_case = re.sub(R'''#.*''' , '''''' , a ) # remove comments
if line:
filtered_lines.append(a )
snake_case = '''\n'''.join(a )
# Make a hash from all this code
snake_case = full_str.encode('''utf-8''' )
return shaaaa(a ).hexdigest()
# get importable module names and hash for caching
_lowercase = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
_lowercase = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_lowercase = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
_lowercase = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 44 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json""",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Any = '''convbert'''
def __init__( self : str , lowerCAmelCase__ : Dict=3_0522 , lowerCAmelCase__ : Union[str, Any]=768 , lowerCAmelCase__ : Tuple=12 , lowerCAmelCase__ : Dict=12 , lowerCAmelCase__ : Optional[Any]=3072 , lowerCAmelCase__ : Optional[int]="gelu" , lowerCAmelCase__ : Union[str, Any]=0.1 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : List[str]=512 , lowerCAmelCase__ : List[Any]=2 , lowerCAmelCase__ : int=0.02 , lowerCAmelCase__ : Dict=1E-12 , lowerCAmelCase__ : List[str]=1 , lowerCAmelCase__ : str=0 , lowerCAmelCase__ : Any=2 , lowerCAmelCase__ : Dict=768 , lowerCAmelCase__ : Union[str, Any]=2 , lowerCAmelCase__ : Union[str, Any]=9 , lowerCAmelCase__ : Optional[Any]=1 , lowerCAmelCase__ : int=None , **lowerCAmelCase__ : str , ):
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: Tuple = vocab_size
SCREAMING_SNAKE_CASE_: Optional[int] = hidden_size
SCREAMING_SNAKE_CASE_: List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_: str = num_attention_heads
SCREAMING_SNAKE_CASE_: int = intermediate_size
SCREAMING_SNAKE_CASE_: Dict = hidden_act
SCREAMING_SNAKE_CASE_: Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE_: List[str] = type_vocab_size
SCREAMING_SNAKE_CASE_: str = initializer_range
SCREAMING_SNAKE_CASE_: Dict = layer_norm_eps
SCREAMING_SNAKE_CASE_: Optional[Any] = embedding_size
SCREAMING_SNAKE_CASE_: int = head_ratio
SCREAMING_SNAKE_CASE_: Tuple = conv_kernel_size
SCREAMING_SNAKE_CASE_: Optional[int] = num_groups
SCREAMING_SNAKE_CASE_: Union[str, Any] = classifier_dropout
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
@property
def _SCREAMING_SNAKE_CASE ( self : str):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_: str = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE_: Optional[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
])
| 671 |
import re
def A_ ( _UpperCAmelCase ):
return [char.split() for char in re.split(R"[^ a-z A-Z 0-9 \s]" , str_ )]
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: int = split_input(str_ )
return "".join(
["".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
try:
SCREAMING_SNAKE_CASE_: List[Any] = split_input(_UpperCAmelCase )
if upper:
SCREAMING_SNAKE_CASE_: List[str] = "".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
SCREAMING_SNAKE_CASE_: Optional[int] = "".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def A_ ( _UpperCAmelCase ):
return to_simple_case(_UpperCAmelCase )
def A_ ( _UpperCAmelCase ):
try:
SCREAMING_SNAKE_CASE_: Optional[int] = to_simple_case(_UpperCAmelCase )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
return to_complex_case(_UpperCAmelCase , _UpperCAmelCase , "_" )
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
return to_complex_case(_UpperCAmelCase , _UpperCAmelCase , "-" )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 671 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __A ( a ):
"""simple docstring"""
A_ = 'levit'
def __init__( self , _lowerCamelCase=2_2_4 , _lowerCamelCase=3 , _lowerCamelCase=3 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=1_6 , _lowerCamelCase=[1_2_8, 2_5_6, 3_8_4] , _lowerCamelCase=[4, 8, 1_2] , _lowerCamelCase=[4, 4, 4] , _lowerCamelCase=[1_6, 1_6, 1_6] , _lowerCamelCase=0 , _lowerCamelCase=[2, 2, 2] , _lowerCamelCase=[2, 2, 2] , _lowerCamelCase=0.0_2 , **_lowerCamelCase , )-> List[Any]:
super().__init__(**_lowerCamelCase )
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = kernel_size
lowercase__ = stride
lowercase__ = padding
lowercase__ = hidden_sizes
lowercase__ = num_attention_heads
lowercase__ = depths
lowercase__ = key_dim
lowercase__ = drop_path_rate
lowercase__ = patch_size
lowercase__ = attention_ratio
lowercase__ = mlp_ratio
lowercase__ = initializer_range
lowercase__ = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __A ( a ):
"""simple docstring"""
A_ = version.parse('1.11' )
@property
def snake_case_( self )-> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def snake_case_( self )-> float:
return 1e-4
| 707 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class __A ( a ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , )-> Tuple:
super().__init__(
features=_lowerCamelCase , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase , streaming=_lowerCamelCase , num_proc=_lowerCamelCase , **_lowerCamelCase , )
lowercase__ = Generator(
cache_dir=_lowerCamelCase , features=_lowerCamelCase , generator=_lowerCamelCase , gen_kwargs=_lowerCamelCase , **_lowerCamelCase , )
def snake_case_( self )-> Optional[Any]:
# Build iterable dataset
if self.streaming:
lowercase__ = self.builder.as_streaming_dataset(split='''train''' )
# Build regular (map-style) dataset
else:
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
self.builder.download_and_prepare(
download_config=_lowerCamelCase , download_mode=_lowerCamelCase , verification_mode=_lowerCamelCase , base_path=_lowerCamelCase , num_proc=self.num_proc , )
lowercase__ = self.builder.as_dataset(
split='''train''' , verification_mode=_lowerCamelCase , in_memory=self.keep_in_memory )
return dataset
| 318 | 0 |
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
A : List[Any] = '''\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",
author = "Lin, Chin-Yew and
Och, Franz Josef",
booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",
month = "aug 23{--}aug 27",
year = "2004",
address = "Geneva, Switzerland",
publisher = "COLING",
url = "https://www.aclweb.org/anthology/C04-1072",
pages = "501--507",
}
'''
A : Tuple = '''\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,
the better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
'''
A : Optional[Any] = '''
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
\'bleu\': bleu score,
\'precisions\': geometric mean of n-gram precisions,
\'brevity_penalty\': brevity penalty,
\'length_ratio\': ratio of lengths,
\'translation_length\': translation_length,
\'reference_length\': reference_length
Examples:
>>> predictions = [
... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample
... ["foo", "bar", "foobar"] # tokenized prediction of the second sample
... ]
>>> references = [
... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)
... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric("bleu")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results["bleu"])
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
"""simple docstring"""
def A ( self : Tuple):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token') , id='sequence'),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token') , id='sequence') , id='references'),
}) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def A ( self : List[str] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str=4 , SCREAMING_SNAKE_CASE : Union[str, Any]=False):
_A : Dict = compute_bleu(
reference_corpus=lowerCAmelCase__ , translation_corpus=lowerCAmelCase__ , max_order=lowerCAmelCase__ , smooth=lowerCAmelCase__)
(_A) : List[Any] = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 128 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : Tuple = ''''''
_A : Dict = '''hf-legacy''' # "hf://"" is reserved for hffs
def __init__( self : List[Any] , lowerCAmelCase__ : Optional[DatasetInfo] = None , lowerCAmelCase__ : Optional[str] = None , **lowerCAmelCase__ : str , ):
"""simple docstring"""
super().__init__(self , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = repo_info
__SCREAMING_SNAKE_CASE : Dict = token
__SCREAMING_SNAKE_CASE : Dict = None
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
if self.dir_cache is None:
__SCREAMING_SNAKE_CASE : Optional[int] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__SCREAMING_SNAKE_CASE : str = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(lowerCAmelCase__ ): {"""name""": str(lowerCAmelCase__ ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def UpperCamelCase__ ( self : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : str = "rb" , **lowerCAmelCase__ : Optional[Any] , ):
"""simple docstring"""
if not isinstance(self.repo_info , lowerCAmelCase__ ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
__SCREAMING_SNAKE_CASE : Tuple = hf_hub_url(self.repo_info.id , lowerCAmelCase__ , revision=self.repo_info.sha )
return fsspec.open(
lowerCAmelCase__ , mode=lowerCAmelCase__ , headers=get_authentication_headers_for_url(lowerCAmelCase__ , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def UpperCamelCase__ ( self : Optional[int] , lowerCAmelCase__ : Union[str, Any] , **lowerCAmelCase__ : Any ):
"""simple docstring"""
self._get_dirs()
__SCREAMING_SNAKE_CASE : Dict = self._strip_protocol(lowerCAmelCase__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(lowerCAmelCase__ )
def UpperCamelCase__ ( self : int , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any]=False , **lowerCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
self._get_dirs()
__SCREAMING_SNAKE_CASE : Union[str, Any] = PurePosixPath(path.strip("""/""" ) )
__SCREAMING_SNAKE_CASE : Dict = {}
for p, f in self.dir_cache.items():
__SCREAMING_SNAKE_CASE : str = PurePosixPath(p.strip("""/""" ) )
__SCREAMING_SNAKE_CASE : Dict = p.parent
if root == path:
__SCREAMING_SNAKE_CASE : int = f
__SCREAMING_SNAKE_CASE : int = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out ) | 578 | 0 |
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''snap-research/efficientformer-l1-300''': (
'''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'''
),
}
class lowerCAmelCase ( snake_case ):
'''simple docstring'''
lowerCAmelCase__ = """efficientformer"""
def __init__( self , a__ = [3, 2, 6, 4] , a__ = [48, 96, 2_24, 4_48] , a__ = [True, True, True, True] , a__ = 4_48 , a__ = 32 , a__ = 4 , a__ = 7 , a__ = 5 , a__ = 8 , a__ = 4 , a__ = 0.0 , a__ = 16 , a__ = 3 , a__ = 3 , a__ = 3 , a__ = 2 , a__ = 1 , a__ = 0.0 , a__ = 1 , a__ = True , a__ = True , a__ = 1E-5 , a__ = "gelu" , a__ = 0.02 , a__ = 1E-12 , a__ = 2_24 , a__ = 1E-05 , **a__ , ):
super().__init__(**a__ )
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = depths
_UpperCAmelCase = mlp_expansion_ratio
_UpperCAmelCase = downsamples
_UpperCAmelCase = dim
_UpperCAmelCase = key_dim
_UpperCAmelCase = attention_ratio
_UpperCAmelCase = resolution
_UpperCAmelCase = pool_size
_UpperCAmelCase = downsample_patch_size
_UpperCAmelCase = downsample_stride
_UpperCAmelCase = downsample_pad
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = num_metaad_blocks
_UpperCAmelCase = distillation
_UpperCAmelCase = use_layer_scale
_UpperCAmelCase = layer_scale_init_value
_UpperCAmelCase = image_size
_UpperCAmelCase = batch_norm_eps
| 710 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class lowerCAmelCase ( snake_case ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 494 | 0 |
import os
import sys
import unittest
lowercase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowercase_ = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""")
lowercase_ = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""")
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = get_test_to_tester_mapping(_A )
__SCREAMING_SNAKE_CASE : int = get_test_to_tester_mapping(_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {'''BertModelTest''': '''BertModelTester'''}
__SCREAMING_SNAKE_CASE : List[str] = {
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(_A ) , _A )
self.assertEqual(get_test_info.to_json(_A ) , _A )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = get_model_to_test_mapping(_A )
__SCREAMING_SNAKE_CASE : Dict = get_model_to_test_mapping(_A )
__SCREAMING_SNAKE_CASE : Dict = {
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(_A ) , _A )
self.assertEqual(get_test_info.to_json(_A ) , _A )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = get_model_to_tester_mapping(_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = get_model_to_tester_mapping(_A )
__SCREAMING_SNAKE_CASE : int = {
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(_A ) , _A )
self.assertEqual(get_test_info.to_json(_A ) , _A )
| 74 |
"""simple docstring"""
import heapq as hq
import math
from collections.abc import Iterator
class __UpperCamelCase :
def __init__( self : Union[str, Any] , UpperCAmelCase : List[Any] ) -> List[str]:
lowerCAmelCase :Dict = str(id_ )
lowerCAmelCase :str = None
lowerCAmelCase :Any = None
lowerCAmelCase :List[str] = []
lowerCAmelCase :List[Any] = {} # {vertex:distance}
def __lt__( self : Union[str, Any] , UpperCAmelCase : Any ) -> str:
return self.key < other.key
def __repr__( self : Union[str, Any] ) -> Dict:
return self.id
def UpperCAmelCase__ ( self : Any , UpperCAmelCase : Tuple ) -> List[str]:
self.neighbors.append(UpperCAmelCase )
def UpperCAmelCase__ ( self : str , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[Any] ) -> Optional[int]:
lowerCAmelCase :str = weight
def UpperCAmelCase ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , a__ )
graph[b - 1].add_edge(graph[a - 1] , a__ )
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
lowerCAmelCase :List[str] = []
for u in graph:
lowerCAmelCase :Tuple = math.inf
lowerCAmelCase :Any = None
lowerCAmelCase :Dict = 0
lowerCAmelCase :int = graph[:]
while q:
lowerCAmelCase :str = min(a__ )
q.remove(a__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
lowerCAmelCase :List[str] = u
lowerCAmelCase :List[Any] = u.edges[v.id]
for i in range(1 , len(a__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
for u in graph:
lowerCAmelCase :List[str] = math.inf
lowerCAmelCase :str = None
lowerCAmelCase :Optional[int] = 0
lowerCAmelCase :Dict = list(a__ )
hq.heapify(a__ )
while h:
lowerCAmelCase :Union[str, Any] = hq.heappop(a__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
lowerCAmelCase :int = u
lowerCAmelCase :List[Any] = u.edges[v.id]
hq.heapify(a__ )
for i in range(1 , len(a__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def UpperCAmelCase ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod() | 553 | 0 |
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] ):
__lowercase : Dict = multiprocessing.Manager()
__lowercase : List[str] = manager.list()
__lowercase : Union[str, Any] = multiprocessing.Process(target=lowerCAmelCase_ , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append("""timed out""" )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any ):
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
__lowercase : Tuple = shutil.rmtree
__lowercase : Optional[Any] = os.rmdir
__lowercase : Optional[int] = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
__lowercase : List[str] = {}
with swallow_io():
with time_limit(lowerCAmelCase_ ):
exec(lowerCAmelCase_ , lowerCAmelCase_ )
result.append("""passed""" )
except TimeoutException:
result.append("""timed out""" )
except BaseException as e:
result.append(F"failed: {e}" )
# Needed for cleaning up.
__lowercase : List[str] = rmtree
__lowercase : Optional[Any] = rmdir
__lowercase : List[str] = chdir
@contextlib.contextmanager
def snake_case_ ( lowerCAmelCase_ : Dict ):
def signal_handler(lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] ):
raise TimeoutException("""Timed out!""" )
signal.setitimer(signal.ITIMER_REAL , lowerCAmelCase_ )
signal.signal(signal.SIGALRM , lowerCAmelCase_ )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def snake_case_ ( ):
__lowercase : List[str] = WriteOnlyStringIO()
with contextlib.redirect_stdout(lowerCAmelCase_ ):
with contextlib.redirect_stderr(lowerCAmelCase_ ):
with redirect_stdin(lowerCAmelCase_ ):
yield
@contextlib.contextmanager
def snake_case_ ( ):
with tempfile.TemporaryDirectory() as dirname:
with chdir(lowerCAmelCase_ ):
yield dirname
class lowerCAmelCase ( __a ):
'''simple docstring'''
pass
class lowerCAmelCase ( io.StringIO ):
'''simple docstring'''
def lowerCAmelCase ( self : int , *__a : int , **__a : Union[str, Any] ) -> int:
"""simple docstring"""
raise OSError
def lowerCAmelCase ( self : int , *__a : Tuple , **__a : int ) -> str:
"""simple docstring"""
raise OSError
def lowerCAmelCase ( self : Any , *__a : Union[str, Any] , **__a : Any ) -> Dict:
"""simple docstring"""
raise OSError
def lowerCAmelCase ( self : Tuple , *__a : Any , **__a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return False
class lowerCAmelCase ( contextlib._RedirectStream ): # type: ignore
'''simple docstring'''
_A : List[str] = "stdin"
@contextlib.contextmanager
def snake_case_ ( lowerCAmelCase_ : int ):
if root == ".":
yield
return
__lowercase : Any = os.getcwd()
os.chdir(lowerCAmelCase_ )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : Union[str, Any]=None ):
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
__lowercase : Dict = None
__lowercase : Any = None
import os
__lowercase : Optional[int] = """1"""
__lowercase : List[Any] = None
__lowercase : Dict = None
__lowercase : Optional[int] = None
__lowercase : Union[str, Any] = None
__lowercase : Union[str, Any] = None
__lowercase : List[str] = None
__lowercase : List[str] = None
__lowercase : Dict = None
__lowercase : Optional[Any] = None
__lowercase : int = None
__lowercase : Union[str, Any] = None
__lowercase : Dict = None
__lowercase : Dict = None
__lowercase : Optional[int] = None
__lowercase : Optional[Any] = None
__lowercase : List[str] = None
__lowercase : Dict = None
__lowercase : Optional[Any] = None
__lowercase : List[str] = None
__lowercase : Union[str, Any] = None
__lowercase : Union[str, Any] = None
__lowercase : Optional[Any] = None
__lowercase : Tuple = None
__lowercase : Optional[Any] = None
__lowercase : str = None
__lowercase : Any = None
__lowercase : Union[str, Any] = None
import shutil
__lowercase : Any = None
__lowercase : Union[str, Any] = None
__lowercase : Union[str, Any] = None
import subprocess
__lowercase : List[str] = None # type: ignore
__lowercase : Any = None
import sys
__lowercase : Dict = None
__lowercase : Optional[int] = None
__lowercase : Tuple = None
__lowercase : Union[str, Any] = None
__lowercase : Dict = None | 706 |
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
__lowercase : List[str] = dict(zip(__a , range(len(__a ) ) ) )
__lowercase : Dict = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
__lowercase : List[str] = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 16000,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
__lowercase : Tuple = tempfile.mkdtemp()
__lowercase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase : str = os.path.join(self.tmpdirname , __a )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
# load decoder from hub
__lowercase : Optional[int] = """hf-internal-testing/ngram-beam-search-decoder"""
def lowerCAmelCase ( self : Optional[Any] , **__a : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Union[str, Any] = self.add_kwargs_tokens_map.copy()
kwargs.update(__a )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : str , **__a : int ) -> Tuple:
"""simple docstring"""
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : Union[str, Any] , **__a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__a )
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_tokenizer()
__lowercase : Any = self.get_feature_extractor()
__lowercase : str = self.get_decoder()
__lowercase : Tuple = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
processor.save_pretrained(self.tmpdirname )
__lowercase : Tuple = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __a )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __a )
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Any = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__lowercase : str = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase : List[str] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(__a , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=__a , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : List[Any] = self.get_feature_extractor()
__lowercase : Union[str, Any] = self.get_tokenizer()
__lowercase : int = self.get_decoder()
__lowercase : int = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Optional[int] = floats_list((3, 1000) )
__lowercase : List[Any] = feature_extractor(__a , return_tensors="""np""" )
__lowercase : List[str] = processor(__a , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[Any] = self.get_feature_extractor()
__lowercase : int = self.get_tokenizer()
__lowercase : Dict = self.get_decoder()
__lowercase : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Dict = """This is a test string"""
__lowercase : Any = processor(text=__a )
__lowercase : Dict = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase ( self : str , __a : Tuple=(2, 10, 16) , __a : int=77 ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(__a )
return np.random.rand(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = self.get_feature_extractor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : str = self.get_decoder()
__lowercase : int = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : List[str] = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__lowercase : Optional[Any] = processor.decode(__a )
__lowercase : Any = decoder.decode_beams(__a )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def lowerCAmelCase ( self : List[str] , __a : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.get_feature_extractor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : Optional[int] = self.get_decoder()
__lowercase : Any = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Optional[Any] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__lowercase : Union[str, Any] = processor.batch_decode(__a )
else:
with get_context(__a ).Pool() as pool:
__lowercase : Optional[Any] = processor.batch_decode(__a , __a )
__lowercase : Union[str, Any] = list(__a )
with get_context("""fork""" ).Pool() as p:
__lowercase : Optional[Any] = decoder.decode_beams_batch(__a , __a )
__lowercase , __lowercase , __lowercase : Any = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__a , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(__a , decoded_processor.logit_score )
self.assertListEqual(__a , decoded_processor.lm_score )
def lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = self.get_feature_extractor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : List[str] = self.get_decoder()
__lowercase : int = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Dict = self._get_dummy_logits()
__lowercase : Tuple = 15
__lowercase : Tuple = -20.0
__lowercase : Dict = -4.0
__lowercase : Dict = processor.batch_decode(
__a , beam_width=__a , beam_prune_logp=__a , token_min_logp=__a , )
__lowercase : Tuple = decoded_processor_out.text
__lowercase : List[Any] = list(__a )
with get_context("""fork""" ).Pool() as pool:
__lowercase : Any = decoder.decode_beams_batch(
__a , __a , beam_width=__a , beam_prune_logp=__a , token_min_logp=__a , )
__lowercase : Optional[Any] = [d[0][0] for d in decoded_decoder_out]
__lowercase : Optional[int] = [d[0][2] for d in decoded_decoder_out]
__lowercase : Optional[int] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__a , __a )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , __a )
self.assertTrue(np.array_equal(__a , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , __a , atol=1E-3 ) )
self.assertTrue(np.array_equal(__a , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , __a , atol=1E-3 ) )
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.get_feature_extractor()
__lowercase : List[Any] = self.get_tokenizer()
__lowercase : List[Any] = self.get_decoder()
__lowercase : Dict = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : List[Any] = self._get_dummy_logits()
__lowercase : Optional[int] = 2.0
__lowercase : Tuple = 5.0
__lowercase : Optional[Any] = -20.0
__lowercase : Tuple = True
__lowercase : Union[str, Any] = processor.batch_decode(
__a , alpha=__a , beta=__a , unk_score_offset=__a , lm_score_boundary=__a , )
__lowercase : Any = decoded_processor_out.text
__lowercase : List[Any] = list(__a )
decoder.reset_params(
alpha=__a , beta=__a , unk_score_offset=__a , lm_score_boundary=__a , )
with get_context("""fork""" ).Pool() as pool:
__lowercase : Tuple = decoder.decode_beams_batch(
__a , __a , )
__lowercase : int = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__a , __a )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , __a )
__lowercase : str = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , __a )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
__lowercase : str = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__lowercase : int = os.listdir(__a )
__lowercase : Optional[Any] = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__a , __a )
def lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = snapshot_download("""hf-internal-testing/processor_with_lm""" )
__lowercase : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(__a )
__lowercase : Dict = processor.decoder.model_container[processor.decoder._model_key]
__lowercase : List[Any] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__lowercase : Dict = os.listdir(__a )
__lowercase : List[Any] = os.listdir(__a )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__a , __a )
def lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Dict = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Any = floats_list((3, 1000) )
__lowercase : List[str] = processor_wavaveca(__a , return_tensors="""np""" )
__lowercase : List[Any] = processor_auto(__a , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
__lowercase : List[str] = self._get_dummy_logits()
__lowercase : List[str] = processor_wavaveca.batch_decode(__a )
__lowercase : Optional[int] = processor_auto.batch_decode(__a )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = self.get_feature_extractor()
__lowercase : Union[str, Any] = self.get_tokenizer()
__lowercase : Dict = self.get_decoder()
__lowercase : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def lowerCAmelCase ( __a : Union[str, Any] , __a : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase : Any = [d[key] for d in offsets]
return retrieved_list
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
__lowercase : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Optional[Any] = self._get_dummy_logits()[0]
__lowercase : Dict = processor.decode(__a , output_word_offsets=__a )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__a , __a ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase : List[str] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Any = self._get_dummy_logits()
__lowercase : Dict = processor.batch_decode(__a , output_word_offsets=__a )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__a , __a ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(__a , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
import torch
__lowercase : Any = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=__a )
__lowercase : str = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=16000 ) )
__lowercase : Tuple = iter(__a )
__lowercase : Union[str, Any] = next(__a )
__lowercase : int = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
__lowercase : int = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__lowercase : Union[str, Any] = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
__lowercase : List[Any] = model(__a ).logits.cpu().numpy()
__lowercase : Tuple = processor.decode(logits[0] , output_word_offsets=__a )
__lowercase : int = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__lowercase : Optional[Any] = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
__lowercase : str = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(__a , """word""" ) ) , __a )
self.assertEqual(""" """.join(self.get_from_offsets(__a , """word""" ) ) , output.text )
# output times
__lowercase : Tuple = torch.tensor(self.get_from_offsets(__a , """start_time""" ) )
__lowercase : Dict = torch.tensor(self.get_from_offsets(__a , """end_time""" ) )
# fmt: off
__lowercase : List[Any] = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
__lowercase : Optional[int] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__a , __a , atol=0.01 ) )
self.assertTrue(torch.allclose(__a , __a , atol=0.01 ) ) | 649 | 0 |
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = 1.5
UpperCAmelCase_ : Union[str, Any] = int(factor * num_class_images )
UpperCAmelCase_ : Optional[Any] = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=_lowercase , aesthetic_weight=0.1 )
os.makedirs(f'''{class_data_dir}/images''' , exist_ok=_lowercase )
if len(list(Path(f'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
UpperCAmelCase_ : List[Any] = client.query(text=_lowercase )
if len(_lowercase ) >= factor * num_class_images or num_images > 1E4:
break
else:
UpperCAmelCase_ : List[str] = int(factor * num_images )
UpperCAmelCase_ : Optional[int] = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=_lowercase , aesthetic_weight=0.1 , )
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : List[Any] = tqdm(desc='''downloading real regularization images''' , total=_lowercase )
with open(f'''{class_data_dir}/caption.txt''' , '''w''' ) as fa, open(f'''{class_data_dir}/urls.txt''' , '''w''' ) as fa, open(
f'''{class_data_dir}/images.txt''' , '''w''' ) as fa:
while total < num_class_images:
UpperCAmelCase_ : Tuple = class_images[count]
count += 1
try:
UpperCAmelCase_ : Optional[Any] = requests.get(images['''url'''] )
if img.status_code == 200:
UpperCAmelCase_ : Dict = Image.open(BytesIO(img.content ) )
with open(f'''{class_data_dir}/images/{total}.jpg''' , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(f'''{class_data_dir}/images/{total}.jpg''' + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Any = argparse.ArgumentParser('''''' , add_help=_lowercase )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=_lowercase , type=_lowercase )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=_lowercase , type=_lowercase )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=200 , type=_lowercase )
return parser.parse_args()
if __name__ == "__main__":
__a = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images) | 30 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 42
__snake_case = 42
class lowerCAmelCase__( __lowercase , __lowercase ):
'''simple docstring'''
__snake_case = 1
@register_to_config
def __init__( self , __lowerCamelCase = 2_0_0_0 , __lowerCamelCase = 0.15 , __lowerCamelCase = 0.01 , __lowerCamelCase = 1348.0 , __lowerCamelCase = 1E-5 , __lowerCamelCase = 1 , ) -> List[Any]:
# standard deviation of the initial noise distribution
_SCREAMING_SNAKE_CASE : List[str] = sigma_max
# setable values
_SCREAMING_SNAKE_CASE : Optional[int] = None
self.set_sigmas(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> torch.FloatTensor:
return sample
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = None ) -> List[Any]:
_SCREAMING_SNAKE_CASE : List[str] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.linspace(1 , __lowerCamelCase , __lowerCamelCase , device=__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None ) -> Any:
_SCREAMING_SNAKE_CASE : List[str] = sigma_min if sigma_min is not None else self.config.sigma_min
_SCREAMING_SNAKE_CASE : int = sigma_max if sigma_max is not None else self.config.sigma_max
_SCREAMING_SNAKE_CASE : Any = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_SCREAMING_SNAKE_CASE : Tuple = torch.exp(torch.linspace(math.log(__lowerCamelCase ) , math.log(__lowerCamelCase ) , __lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Tuple:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = True , ) -> Union[SdeVeOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
_SCREAMING_SNAKE_CASE : int = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
_SCREAMING_SNAKE_CASE : Tuple = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_SCREAMING_SNAKE_CASE : List[Any] = timesteps.to(self.discrete_sigmas.device )
_SCREAMING_SNAKE_CASE : Any = self.discrete_sigmas[timesteps].to(sample.device )
_SCREAMING_SNAKE_CASE : Optional[int] = self.get_adjacent_sigma(__lowerCamelCase , __lowerCamelCase ).to(sample.device )
_SCREAMING_SNAKE_CASE : Tuple = torch.zeros_like(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_SCREAMING_SNAKE_CASE : Optional[int] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
_SCREAMING_SNAKE_CASE : Dict = diffusion.unsqueeze(-1 )
_SCREAMING_SNAKE_CASE : List[str] = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_SCREAMING_SNAKE_CASE : List[Any] = randn_tensor(
sample.shape , layout=sample.layout , generator=__lowerCamelCase , device=sample.device , dtype=sample.dtype )
_SCREAMING_SNAKE_CASE : int = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_SCREAMING_SNAKE_CASE : Optional[Any] = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__lowerCamelCase , prev_sample_mean=__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = True , ) -> Union[SchedulerOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_SCREAMING_SNAKE_CASE : Dict = randn_tensor(sample.shape , layout=sample.layout , generator=__lowerCamelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
_SCREAMING_SNAKE_CASE : Any = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
_SCREAMING_SNAKE_CASE : Any = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
_SCREAMING_SNAKE_CASE : List[Any] = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_SCREAMING_SNAKE_CASE : List[Any] = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_SCREAMING_SNAKE_CASE : Dict = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
_SCREAMING_SNAKE_CASE : Any = step_size.unsqueeze(-1 )
_SCREAMING_SNAKE_CASE : List[str] = sample + step_size * model_output
_SCREAMING_SNAKE_CASE : Any = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_SCREAMING_SNAKE_CASE : int = timesteps.to(original_samples.device )
_SCREAMING_SNAKE_CASE : Optional[Any] = self.discrete_sigmas.to(original_samples.device )[timesteps]
_SCREAMING_SNAKE_CASE : Any = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__lowerCamelCase ) * sigmas[:, None, None, None]
)
_SCREAMING_SNAKE_CASE : List[str] = noise + original_samples
return noisy_samples
def __len__( self ) -> List[Any]:
return self.config.num_train_timesteps | 249 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowerCamelCase : List[str] = '''Create a default config file for Accelerate with only a few flags set.'''
def __lowerCAmelCase ( __snake_case="no" , __snake_case = default_json_config_file , __snake_case = False ):
__lowerCAmelCase = Path(__snake_case )
path.parent.mkdir(parents=__snake_case , exist_ok=__snake_case )
if path.exists():
print(
F"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
__lowerCAmelCase = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
__lowerCAmelCase = {
"compute_environment": "LOCAL_MACHINE",
"mixed_precision": mixed_precision,
}
if torch.cuda.is_available():
__lowerCAmelCase = torch.cuda.device_count()
__lowerCAmelCase = num_gpus
__lowerCAmelCase = False
if num_gpus > 1:
__lowerCAmelCase = "MULTI_GPU"
else:
__lowerCAmelCase = "NO"
elif is_xpu_available() and use_xpu:
__lowerCAmelCase = torch.xpu.device_count()
__lowerCAmelCase = num_xpus
__lowerCAmelCase = False
if num_xpus > 1:
__lowerCAmelCase = "MULTI_XPU"
else:
__lowerCAmelCase = "NO"
elif is_npu_available():
__lowerCAmelCase = torch.npu.device_count()
__lowerCAmelCase = num_npus
__lowerCAmelCase = False
if num_npus > 1:
__lowerCAmelCase = "MULTI_NPU"
else:
__lowerCAmelCase = "NO"
else:
__lowerCAmelCase = 0
__lowerCAmelCase = True
__lowerCAmelCase = 1
__lowerCAmelCase = "NO"
__lowerCAmelCase = ClusterConfig(**__snake_case )
config.to_json_file(__snake_case )
return path
def __lowerCAmelCase ( __snake_case , __snake_case ):
__lowerCAmelCase = parser.add_parser("default" , parents=__snake_case , help=__snake_case , formatter_class=__snake_case )
parser.add_argument(
"--config_file" , default=__snake_case , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , dest="save_location" , )
parser.add_argument(
"--mixed_precision" , choices=["no", "fp16", "bf16"] , type=__snake_case , help="Whether or not to use mixed precision training. "
"Choose between FP16 and BF16 (bfloat16) training. "
"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later." , default="no" , )
parser.set_defaults(func=__snake_case )
return parser
def __lowerCAmelCase ( __snake_case ):
__lowerCAmelCase = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F"""accelerate configuration saved at {config_file}""" )
| 290 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
lowerCamelCase : int = 1.0_5457_1817E-34 # unit of ℏ : J * s
lowerCamelCase : Union[str, Any] = 3E8 # unit of c : m * s^-1
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case ):
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
__lowerCAmelCase = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
__lowerCAmelCase = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
__lowerCAmelCase = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 290 | 1 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class a_ ( _a ):
def UpperCamelCase_ ( self ):
_lowercase = SMALL_MODEL_IDENTIFIER
_lowercase = """pt"""
_lowercase = """tf"""
def UpperCamelCase_ ( self , __UpperCamelCase ):
_lowercase = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__UpperCamelCase )
def UpperCamelCase_ ( self , __UpperCamelCase ):
_lowercase = TFAutoModel.from_pretrained(self.test_model , from_pt=__UpperCamelCase )
model_tf.save_pretrained(__UpperCamelCase )
def UpperCamelCase_ ( self ):
_lowercase = """mock_framework"""
# Framework provided - return whatever the user provides
_lowercase = FeaturesManager.determine_framework(self.test_model , __UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__UpperCamelCase )
_lowercase = FeaturesManager.determine_framework(__UpperCamelCase , __UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__UpperCamelCase )
_lowercase = FeaturesManager.determine_framework(__UpperCamelCase , __UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCamelCase_ ( self ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__UpperCamelCase )
_lowercase = FeaturesManager.determine_framework(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__UpperCamelCase )
_lowercase = FeaturesManager.determine_framework(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__UpperCamelCase ):
_lowercase = FeaturesManager.determine_framework(__UpperCamelCase )
def UpperCamelCase_ ( self ):
_lowercase = MagicMock(return_value=__UpperCamelCase )
with patch("""transformers.onnx.features.is_tf_available""" , __UpperCamelCase ):
_lowercase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCamelCase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
_lowercase = MagicMock(return_value=__UpperCamelCase )
with patch("""transformers.onnx.features.is_torch_available""" , __UpperCamelCase ):
_lowercase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCamelCase , self.framework_tf )
# Both in environment -> use PyTorch
_lowercase = MagicMock(return_value=__UpperCamelCase )
_lowercase = MagicMock(return_value=__UpperCamelCase )
with patch("""transformers.onnx.features.is_tf_available""" , __UpperCamelCase ), patch(
"""transformers.onnx.features.is_torch_available""" , __UpperCamelCase ):
_lowercase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCamelCase , self.framework_pt )
# Both not in environment -> raise error
_lowercase = MagicMock(return_value=__UpperCamelCase )
_lowercase = MagicMock(return_value=__UpperCamelCase )
with patch("""transformers.onnx.features.is_tf_available""" , __UpperCamelCase ), patch(
"""transformers.onnx.features.is_torch_available""" , __UpperCamelCase ):
with self.assertRaises(__UpperCamelCase ):
_lowercase = FeaturesManager.determine_framework(self.test_model ) | 287 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=10 , __UpperCamelCase=18 , __UpperCamelCase=30 , __UpperCamelCase=400 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=None , ):
_lowercase = size if size is not None else {"""shortest_edge""": 18}
_lowercase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_lowercase = parent
_lowercase = batch_size
_lowercase = num_channels
_lowercase = num_frames
_lowercase = image_size
_lowercase = min_resolution
_lowercase = max_resolution
_lowercase = do_resize
_lowercase = size
_lowercase = do_normalize
_lowercase = image_mean
_lowercase = image_std
_lowercase = crop_size
def UpperCamelCase_ ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class a_ ( _a , unittest.TestCase ):
a : str = VivitImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ):
_lowercase = VivitImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ):
_lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """image_std""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """size""" ) )
def UpperCamelCase_ ( self ):
_lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_lowercase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def UpperCamelCase_ ( self ):
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
_lowercase = prepare_video_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for video in video_inputs:
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
_lowercase = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowercase = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase_ ( self ):
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowercase = prepare_video_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for video in video_inputs:
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
_lowercase = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowercase = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase_ ( self ):
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowercase = prepare_video_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for video in video_inputs:
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
_lowercase = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowercase = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , ) | 287 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase_ ( self : str ):
_A = 1
_A = 3
_A = (32, 32)
_A = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_UpperCAmelCase )
return image
@property
def lowerCAmelCase_ ( self : Optional[int] ):
torch.manual_seed(0 )
_A = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=_UpperCAmelCase , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def lowerCAmelCase_ ( self : List[Any] ):
torch.manual_seed(0 )
_A = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def lowerCAmelCase_ ( self : str ):
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='gelu' , projection_dim=512 , )
return CLIPTextModel(_UpperCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_A = 'cpu' # ensure determinism for the device-dependent torch.Generator
_A = self.dummy_cond_unet_upscale
_A = DDPMScheduler()
_A = DDIMScheduler(prediction_type='v_prediction' )
_A = self.dummy_vae
_A = self.dummy_text_encoder
_A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_A = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_A = StableDiffusionUpscalePipeline(
unet=_UpperCAmelCase , low_res_scheduler=_UpperCAmelCase , scheduler=_UpperCAmelCase , vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , max_noise_level=350 , )
_A = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
_A = 'A painting of a squirrel eating a burger'
_A = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
_A = sd_pipe(
[prompt] , image=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
_A = output.images
_A = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
_A = sd_pipe(
[prompt] , image=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , return_dict=_UpperCAmelCase , )[0]
_A = image[0, -3:, -3:, -1]
_A = image_from_tuple[0, -3:, -3:, -1]
_A = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_A = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase_ ( self : Dict ):
_A = 'cpu' # ensure determinism for the device-dependent torch.Generator
_A = self.dummy_cond_unet_upscale
_A = DDPMScheduler()
_A = DDIMScheduler(prediction_type='v_prediction' )
_A = self.dummy_vae
_A = self.dummy_text_encoder
_A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_A = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_A = StableDiffusionUpscalePipeline(
unet=_UpperCAmelCase , low_res_scheduler=_UpperCAmelCase , scheduler=_UpperCAmelCase , vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , max_noise_level=350 , )
_A = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
_A = 'A painting of a squirrel eating a burger'
_A = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
_A = output.images
assert image.shape[0] == 2
_A = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
_A = sd_pipe(
[prompt] , image=_UpperCAmelCase , generator=_UpperCAmelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
_A = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def lowerCAmelCase_ ( self : int ):
_A = self.dummy_cond_unet_upscale
_A = DDPMScheduler()
_A = DDIMScheduler(prediction_type='v_prediction' )
_A = self.dummy_vae
_A = self.dummy_text_encoder
_A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_A = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert('RGB' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
_A = unet.half()
_A = text_encoder.half()
# make sure here that pndm scheduler skips prk
_A = StableDiffusionUpscalePipeline(
unet=_UpperCAmelCase , low_res_scheduler=_UpperCAmelCase , scheduler=_UpperCAmelCase , vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , max_noise_level=350 , )
_A = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
_A = 'A painting of a squirrel eating a burger'
_A = torch.manual_seed(0 )
_A = sd_pipe(
[prompt] , image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type='np' , ).images
_A = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : Optional[int] ):
_A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
_A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat.npy' )
_A = 'stabilityai/stable-diffusion-x4-upscaler'
_A = StableDiffusionUpscalePipeline.from_pretrained(_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
_A = 'a cat sitting on a park bench'
_A = torch.manual_seed(0 )
_A = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , generator=_UpperCAmelCase , output_type='np' , )
_A = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def lowerCAmelCase_ ( self : Tuple ):
_A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
_A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat_fp16.npy' )
_A = 'stabilityai/stable-diffusion-x4-upscaler'
_A = StableDiffusionUpscalePipeline.from_pretrained(
_UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
_A = 'a cat sitting on a park bench'
_A = torch.manual_seed(0 )
_A = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , generator=_UpperCAmelCase , output_type='np' , )
_A = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def lowerCAmelCase_ ( self : Tuple ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
_A = 'stabilityai/stable-diffusion-x4-upscaler'
_A = StableDiffusionUpscalePipeline.from_pretrained(
_UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_A = 'a cat sitting on a park bench'
_A = torch.manual_seed(0 )
_A = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=5 , output_type='np' , )
_A = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 505 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a = logging.get_logger(__name__)
a = {'''vocab_file''': '''spiece.model'''}
a = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
}
}
a = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
a = '''▁'''
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : int = VOCAB_FILES_NAMES
UpperCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Dict=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : int="[CLS]" , _UpperCAmelCase : Dict="[SEP]" , _UpperCAmelCase : Dict="<unk>" , _UpperCAmelCase : int="[SEP]" , _UpperCAmelCase : List[Any]="<pad>" , _UpperCAmelCase : int="[CLS]" , _UpperCAmelCase : Dict="[MASK]" , _UpperCAmelCase : Optional[Dict[str, Any]] = None , **_UpperCAmelCase : List[str] , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_A = (
AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase , normalized=_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase )
else mask_token
)
_A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
_A = do_lower_case
_A = remove_space
_A = keep_accents
_A = vocab_file
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCAmelCase )
@property
def lowerCAmelCase_ ( self : List[Any] ):
return len(self.sp_model )
def lowerCAmelCase_ ( self : Optional[int] ):
_A = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
_A = self.__dict__.copy()
_A = None
return state
def __setstate__( self : str , _UpperCAmelCase : Optional[Any] ):
_A = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : List[str] ):
if self.remove_space:
_A = ' '.join(inputs.strip().split() )
else:
_A = inputs
_A = outputs.replace('``' , '"' ).replace('\'\'' , '"' )
if not self.keep_accents:
_A = unicodedata.normalize('NFKD' , _UpperCAmelCase )
_A = ''.join([c for c in outputs if not unicodedata.combining(_UpperCAmelCase )] )
if self.do_lower_case:
_A = outputs.lower()
return outputs
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : str ):
_A = self.preprocess_text(_UpperCAmelCase )
_A = self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
_A = []
for piece in pieces:
if len(_UpperCAmelCase ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
_A = self.sp_model.EncodeAsPieces(piece[:-1].replace(_UpperCAmelCase , '' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_A = cur_pieces[1:]
else:
_A = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_UpperCAmelCase )
else:
new_pieces.append(_UpperCAmelCase )
return new_pieces
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Optional[int] ):
return self.sp_model.PieceToId(_UpperCAmelCase )
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : List[Any] ):
return self.sp_model.IdToPiece(_UpperCAmelCase )
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Union[str, Any] ):
_A = []
_A = ''
_A = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCAmelCase ) + token
_A = True
_A = []
else:
current_sub_tokens.append(_UpperCAmelCase )
_A = False
out_string += self.sp_model.decode(_UpperCAmelCase )
return out_string.strip()
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is not None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ):
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase , 'wb' ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
| 505 | 1 |
UpperCAmelCase = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
10: '''a''',
11: '''b''',
12: '''c''',
13: '''d''',
14: '''e''',
15: '''f''',
}
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
assert type(__SCREAMING_SNAKE_CASE ) in (int, float) and decimal == int(__SCREAMING_SNAKE_CASE )
lowercase = int(__SCREAMING_SNAKE_CASE )
lowercase = ''
lowercase = False
if decimal < 0:
lowercase = True
decimal *= -1
while decimal > 0:
lowercase , lowercase = divmod(__SCREAMING_SNAKE_CASE , 16 )
lowercase = values[remainder] + hexadecimal
lowercase = '0x' + hexadecimal
if negative:
lowercase = '-' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
UpperCAmelCase = get_logger(__name__)
class A_ :
'''simple docstring'''
_UpperCamelCase : Dict = """dummy_data"""
_UpperCamelCase : Optional[int] = """datasets"""
_UpperCamelCase : Tuple = False
def __init__( self , snake_case , snake_case , snake_case , snake_case = None , snake_case = False , snake_case = True , snake_case = None , ):
lowercase = 0
lowercase = dataset_name
lowercase = cache_dir
lowercase = use_local_dummy_data
lowercase = config
# download_callbacks take a single url as input
lowercase = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowercase = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowercase = str(snake_case )
# to be downloaded
lowercase = None
lowercase = None
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self._dummy_file is None:
lowercase = self.download_dummy_data()
return self._dummy_file
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return os.path.join(self.dummy_data_folder , 'dummy_data.zip' )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowercase = cached_path(
snake_case , cache_dir=self.cache_dir , extract_compressed_file=snake_case , force_extract=snake_case )
return os.path.join(snake_case , self.dummy_file_name )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self._bucket_url is None:
lowercase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) )
return self._bucket_url
@property
def SCREAMING_SNAKE_CASE__ ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowercase = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowercase = self.dummy_file_name
# special case when data_url is a dict
if isinstance(snake_case , snake_case ):
return self.create_dummy_data_dict(snake_case , snake_case )
elif isinstance(snake_case , (list, tuple) ):
return self.create_dummy_data_list(snake_case , snake_case )
else:
return self.create_dummy_data_single(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case ):
return self.download_and_extract(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
return self.download_and_extract(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case , **snake_case ):
return path
def SCREAMING_SNAKE_CASE__ ( self ):
return {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(snake_case , snake_case ):
for single_url in single_urls:
download_callback(snake_case )
else:
lowercase = single_urls
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(snake_case , snake_case ):
lowercase = [os.path.join(snake_case , urllib.parse.quote_plus(Path(snake_case ).name ) ) for x in single_urls]
else:
lowercase = single_urls
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(Path(snake_case ).name ) )
lowercase = value
# make sure that values are unique
if all(isinstance(snake_case , snake_case ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowercase = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowercase = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , snake_case ) ) for url in data_url )
lowercase = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowercase = [data_url[0]] * len(snake_case )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(single_url.split('/' )[-1] ) )
dummy_data_list.append(snake_case )
return dummy_data_list
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
for download_callback in self.download_callbacks:
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(data_url.split('/' )[-1] ) )
if os.path.exists(snake_case ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
def _iter_archive_members(snake_case ):
# this preserves the order of the members inside the ZIP archive
lowercase = Path(self.dummy_file ).parent
lowercase = path.relative_to(snake_case )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowercase = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(snake_case )
lowercase = Path(snake_case )
lowercase = _iter_archive_members(snake_case ) if self.use_local_dummy_data else path.rglob('*' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__') ):
yield file_path.relative_to(snake_case ).as_posix(), file_path.open('rb' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if not isinstance(snake_case , snake_case ):
lowercase = [paths]
for path in paths:
if os.path.isfile(snake_case ):
if os.path.basename(snake_case ).startswith(('.', '__') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(snake_case ):
if os.path.basename(snake_case ).startswith(('.', '__') ):
continue
dirnames.sort()
for filename in sorted(snake_case ):
if filename.startswith(('.', '__') ):
continue
yield os.path.join(snake_case , snake_case )
| 84 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class a_ ( _UpperCAmelCase ):
def __init__( self : int , __UpperCamelCase : int , __UpperCamelCase : Any=13 , __UpperCamelCase : Tuple=7 , __UpperCamelCase : str=True , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : Dict=True , __UpperCamelCase : List[str]=True , __UpperCamelCase : Tuple=True , __UpperCamelCase : Dict=False , __UpperCamelCase : List[str]=False , __UpperCamelCase : Optional[Any]=False , __UpperCamelCase : List[Any]=2 , __UpperCamelCase : List[Any]=99 , __UpperCamelCase : List[str]=0 , __UpperCamelCase : List[str]=32 , __UpperCamelCase : List[Any]=5 , __UpperCamelCase : Dict=4 , __UpperCamelCase : List[Any]=0.1 , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : Optional[Any]=5_12 , __UpperCamelCase : Optional[int]=12 , __UpperCamelCase : List[Any]=2 , __UpperCamelCase : str=0.0_2 , __UpperCamelCase : Any=3 , __UpperCamelCase : Dict=4 , __UpperCamelCase : Tuple="last" , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : Dict=None , ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_lengths
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = gelu_activation
_UpperCAmelCase = sinusoidal_embeddings
_UpperCAmelCase = causal
_UpperCAmelCase = asm
_UpperCAmelCase = n_langs
_UpperCAmelCase = vocab_size
_UpperCAmelCase = n_special
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = summary_type
_UpperCAmelCase = use_proj
_UpperCAmelCase = scope
def _snake_case ( self : Any ) ->str:
'''simple docstring'''
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_input_lengths:
_UpperCAmelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , 2 ).float()
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _snake_case ( self : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _snake_case ( self : int , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : int , ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = FlaubertModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , lengths=__UpperCamelCase , langs=__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase , langs=__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : int , __UpperCamelCase : Tuple , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : str , ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = FlaubertWithLMHeadModel(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : str , __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , ) ->Any:
'''simple docstring'''
_UpperCAmelCase = FlaubertForQuestionAnsweringSimple(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple , __UpperCamelCase : int , __UpperCamelCase : List[str] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , ) ->Any:
'''simple docstring'''
_UpperCAmelCase = FlaubertForQuestionAnswering(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase )
_UpperCAmelCase = model(
__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , cls_index=__UpperCamelCase , is_impossible=__UpperCamelCase , p_mask=__UpperCamelCase , )
_UpperCAmelCase = model(
__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , cls_index=__UpperCamelCase , is_impossible=__UpperCamelCase , )
((_UpperCAmelCase) ,) = result_with_labels.to_tuple()
_UpperCAmelCase = model(__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase )
((_UpperCAmelCase) ,) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _snake_case ( self : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str , ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = FlaubertForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self : Optional[int] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : Any , __UpperCamelCase : int , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : Optional[int] , ) ->Any:
'''simple docstring'''
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = FlaubertForTokenClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = FlaubertForMultipleChoice(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self : Dict ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class a_ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
a : Dict = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
a : Tuple = (
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _snake_case ( self : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : str ) ->Union[str, Any]:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _snake_case ( self : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple=False ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCamelCase )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCamelCase )
return inputs_dict
def _snake_case ( self : Optional[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = FlaubertModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , emb_dim=37 )
def _snake_case ( self : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _snake_case ( self : Any ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__UpperCamelCase )
def _snake_case ( self : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__UpperCamelCase )
def _snake_case ( self : Optional[Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*__UpperCamelCase )
def _snake_case ( self : Optional[Any] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__UpperCamelCase )
def _snake_case ( self : Dict ) ->str:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__UpperCamelCase )
def _snake_case ( self : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*__UpperCamelCase )
def _snake_case ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*__UpperCamelCase )
@slow
def _snake_case ( self : Any ) ->List[Any]:
'''simple docstring'''
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = FlaubertModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@slow
@require_torch_gpu
def _snake_case ( self : Tuple ) ->int:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
_UpperCAmelCase = True
_UpperCAmelCase = model_class(config=__UpperCamelCase )
_UpperCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = torch.jit.trace(
__UpperCamelCase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__UpperCamelCase , os.path.join(__UpperCamelCase , """traced_model.pt""" ) )
_UpperCAmelCase = torch.jit.load(os.path.join(__UpperCamelCase , """traced_model.pt""" ) , map_location=__UpperCamelCase )
loaded(inputs_dict["""input_ids"""].to(__UpperCamelCase ) , inputs_dict["""attention_mask"""].to(__UpperCamelCase ) )
@require_torch
class a_ ( unittest.TestCase ):
@slow
def _snake_case ( self : Tuple ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
_UpperCAmelCase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
with torch.no_grad():
_UpperCAmelCase = model(__UpperCamelCase )[0]
_UpperCAmelCase = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , __UpperCamelCase )
_UpperCAmelCase = torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCamelCase , atol=1e-4 ) ) | 19 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a_ ( unittest.TestCase ):
@property
def _snake_case ( self : Dict ) ->int:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
@property
def _snake_case ( self : Optional[Any] ) ->str:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , )
return model
@property
def _snake_case ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(__UpperCamelCase )
def _snake_case ( self : List[str] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = self.dummy_uncond_unet
_UpperCAmelCase = DDIMScheduler()
_UpperCAmelCase = self.dummy_vq_model
_UpperCAmelCase = LDMPipeline(unet=__UpperCamelCase , vqvae=__UpperCamelCase , scheduler=__UpperCamelCase )
ldm.to(__UpperCamelCase )
ldm.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = ldm(generator=__UpperCamelCase , num_inference_steps=2 , output_type="""numpy""" ).images
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = ldm(generator=__UpperCamelCase , num_inference_steps=2 , output_type="""numpy""" , return_dict=__UpperCamelCase )[0]
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
_UpperCAmelCase = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class a_ ( unittest.TestCase ):
def _snake_case ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(__UpperCamelCase )
ldm.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = ldm(generator=__UpperCamelCase , num_inference_steps=5 , output_type="""numpy""" ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
_UpperCAmelCase = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
_UpperCAmelCase = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance | 19 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 91 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _snake_case ( self: int ):
torch.manual_seed(0 )
__lowerCamelCase : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def _snake_case ( self: str ):
torch.manual_seed(0 )
__lowerCamelCase : Any = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def _snake_case ( self: Dict ):
torch.manual_seed(0 )
__lowerCamelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(a )
def _snake_case ( self: List[str] ):
__lowerCamelCase : Union[str, Any] = self.dummy_uncond_unet
__lowerCamelCase : List[str] = DDIMScheduler()
__lowerCamelCase : str = self.dummy_vq_model
__lowerCamelCase : Optional[int] = LDMPipeline(unet=a , vqvae=a , scheduler=a )
ldm.to(a )
ldm.set_progress_bar_config(disable=a )
__lowerCamelCase : Optional[int] = torch.manual_seed(0 )
__lowerCamelCase : Any = ldm(generator=a , num_inference_steps=2 , output_type='numpy' ).images
__lowerCamelCase : Tuple = torch.manual_seed(0 )
__lowerCamelCase : Dict = ldm(generator=a , num_inference_steps=2 , output_type='numpy' , return_dict=a )[0]
__lowerCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[int] = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
__lowerCamelCase : str = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : int = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(a )
ldm.set_progress_bar_config(disable=a )
__lowerCamelCase : Dict = torch.manual_seed(0 )
__lowerCamelCase : int = ldm(generator=a , num_inference_steps=5 , output_type='numpy' ).images
__lowerCamelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__lowerCamelCase : List[Any] = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
__lowerCamelCase : Union[str, Any] = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 669 | 0 |
"""simple docstring"""
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
__A = sys.version_info >= (3, 10)
def lowercase_ ( _lowerCamelCase: Tuple=None , _lowerCamelCase: List[str]=None ) -> Dict:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=_lowerCamelCase )
@dataclass
class _snake_case :
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
@dataclass
class _snake_case :
snake_case__ = 42
snake_case__ = field(default="toto" , metadata={"help": "help message"} )
@dataclass
class _snake_case :
snake_case__ = False
snake_case__ = True
snake_case__ = None
class _snake_case ( UpperCAmelCase_ ):
snake_case__ = 'titi'
snake_case__ = 'toto'
class _snake_case ( UpperCAmelCase_ ):
snake_case__ = 'titi'
snake_case__ = 'toto'
snake_case__ = 42
@dataclass
class _snake_case :
snake_case__ = "toto"
def lowerCamelCase__ ( self : int ):
__lowerCamelCase : str = BasicEnum(self.foo )
@dataclass
class _snake_case :
snake_case__ = "toto"
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : Dict = MixedTypeEnum(self.foo )
@dataclass
class _snake_case :
snake_case__ = None
snake_case__ = field(default=UpperCAmelCase_ , metadata={"help": "help message"} )
snake_case__ = None
snake_case__ = list_field(default=[] )
snake_case__ = list_field(default=[] )
@dataclass
class _snake_case :
snake_case__ = list_field(default=[] )
snake_case__ = list_field(default=[1, 2, 3] )
snake_case__ = list_field(default=["Hallo", "Bonjour", "Hello"] )
snake_case__ = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class _snake_case :
snake_case__ = field()
snake_case__ = field()
snake_case__ = field()
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : Dict = BasicEnum(self.required_enum )
@dataclass
class _snake_case :
snake_case__ = 42
snake_case__ = field()
snake_case__ = None
snake_case__ = field(default="toto" , metadata={"help": "help message"} )
snake_case__ = list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class _snake_case :
snake_case__ = False
snake_case__ = True
snake_case__ = None
@dataclass
class _snake_case :
snake_case__ = None
snake_case__ = field(default=UpperCAmelCase_ , metadata={"help": "help message"} )
snake_case__ = None
snake_case__ = list_field(default=[] )
snake_case__ = list_field(default=[] )
class _snake_case ( unittest.TestCase ):
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : List[str] ):
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
__lowerCamelCase : List[str] = {k: v for k, v in vars(_lowercase ).items() if k != 'container'}
__lowerCamelCase : List[str] = {k: v for k, v in vars(_lowercase ).items() if k != 'container'}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("choices" , _lowercase ) and yy.get("choices" , _lowercase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["type"](_lowercase ) , yy["type"](_lowercase ) )
del xx["type"], yy["type"]
self.assertEqual(_lowercase , _lowercase )
def lowerCamelCase__ ( self : Dict ):
__lowerCamelCase : Optional[int] = HfArgumentParser(_lowercase )
__lowerCamelCase : List[str] = argparse.ArgumentParser()
expected.add_argument("--foo" , type=_lowercase , required=_lowercase )
expected.add_argument("--bar" , type=_lowercase , required=_lowercase )
expected.add_argument("--baz" , type=_lowercase , required=_lowercase )
expected.add_argument("--flag" , type=_lowercase , default=_lowercase , const=_lowercase , nargs="?" )
self.argparsersEqual(_lowercase , _lowercase )
__lowerCamelCase : List[Any] = ['--foo', '1', '--baz', 'quux', '--bar', '0.5']
(__lowerCamelCase ) : Any = parser.parse_args_into_dataclasses(_lowercase , look_for_args_file=_lowercase )
self.assertFalse(example.flag )
def lowerCamelCase__ ( self : int ):
__lowerCamelCase : Union[str, Any] = HfArgumentParser(_lowercase )
__lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument("--foo" , default=42 , type=_lowercase )
expected.add_argument("--baz" , default="toto" , type=_lowercase , help="help message" )
self.argparsersEqual(_lowercase , _lowercase )
def lowerCamelCase__ ( self : Union[str, Any] ):
__lowerCamelCase : Any = argparse.ArgumentParser()
expected.add_argument("--foo" , type=_lowercase , default=_lowercase , const=_lowercase , nargs="?" )
expected.add_argument("--baz" , type=_lowercase , default=_lowercase , const=_lowercase , nargs="?" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("--no_baz" , action="store_false" , default=_lowercase , dest="baz" )
expected.add_argument("--opt" , type=_lowercase , default=_lowercase )
__lowerCamelCase : Any = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowercase )
for dataclass_type in dataclass_types:
__lowerCamelCase : List[str] = HfArgumentParser(_lowercase )
self.argparsersEqual(_lowercase , _lowercase )
__lowerCamelCase : Dict = parser.parse_args([] )
self.assertEqual(_lowercase , Namespace(foo=_lowercase , baz=_lowercase , opt=_lowercase ) )
__lowerCamelCase : Any = parser.parse_args(["--foo", "--no_baz"] )
self.assertEqual(_lowercase , Namespace(foo=_lowercase , baz=_lowercase , opt=_lowercase ) )
__lowerCamelCase : Optional[Any] = parser.parse_args(["--foo", "--baz"] )
self.assertEqual(_lowercase , Namespace(foo=_lowercase , baz=_lowercase , opt=_lowercase ) )
__lowerCamelCase : Any = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] )
self.assertEqual(_lowercase , Namespace(foo=_lowercase , baz=_lowercase , opt=_lowercase ) )
__lowerCamelCase : str = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] )
self.assertEqual(_lowercase , Namespace(foo=_lowercase , baz=_lowercase , opt=_lowercase ) )
def lowerCamelCase__ ( self : Optional[int] ):
__lowerCamelCase : Any = HfArgumentParser(_lowercase )
__lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=["titi", "toto", 42] , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(_lowercase , _lowercase )
__lowerCamelCase : Tuple = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
__lowerCamelCase : List[str] = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
__lowerCamelCase : Any = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
__lowerCamelCase : List[str] = parser.parse_args_into_dataclasses(["--foo", "titi"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
__lowerCamelCase : Optional[Any] = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
__lowerCamelCase : Any = parser.parse_args_into_dataclasses(["--foo", "42"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def lowerCamelCase__ ( self : Any ):
@dataclass
class _snake_case :
snake_case__ = "toto"
__lowerCamelCase : Tuple = HfArgumentParser(_lowercase )
__lowerCamelCase : Dict = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=("titi", "toto", 42) , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(_lowercase , _lowercase )
__lowerCamelCase : Any = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
__lowerCamelCase : Dict = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
__lowerCamelCase : int = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
def lowerCamelCase__ ( self : Dict ):
__lowerCamelCase : Union[str, Any] = HfArgumentParser(_lowercase )
__lowerCamelCase : Optional[int] = argparse.ArgumentParser()
expected.add_argument("--foo_int" , nargs="+" , default=[] , type=_lowercase )
expected.add_argument("--bar_int" , nargs="+" , default=[1, 2, 3] , type=_lowercase )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=_lowercase )
expected.add_argument("--foo_float" , nargs="+" , default=[0.1, 0.2, 0.3] , type=_lowercase )
self.argparsersEqual(_lowercase , _lowercase )
__lowerCamelCase : int = parser.parse_args([] )
self.assertEqual(
_lowercase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["Hallo", "Bonjour", "Hello"] , foo_float=[0.1, 0.2, 0.3] ) , )
__lowerCamelCase : Any = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() )
self.assertEqual(_lowercase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["a", "b", "c"] , foo_float=[0.1, 0.7] ) )
def lowerCamelCase__ ( self : List[Any] ):
__lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument("--foo" , default=_lowercase , type=_lowercase )
expected.add_argument("--bar" , default=_lowercase , type=_lowercase , help="help message" )
expected.add_argument("--baz" , default=_lowercase , type=_lowercase )
expected.add_argument("--ces" , nargs="+" , default=[] , type=_lowercase )
expected.add_argument("--des" , nargs="+" , default=[] , type=_lowercase )
__lowerCamelCase : Any = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowercase )
for dataclass_type in dataclass_types:
__lowerCamelCase : List[Any] = HfArgumentParser(_lowercase )
self.argparsersEqual(_lowercase , _lowercase )
__lowerCamelCase : Any = parser.parse_args([] )
self.assertEqual(_lowercase , Namespace(foo=_lowercase , bar=_lowercase , baz=_lowercase , ces=[] , des=[] ) )
__lowerCamelCase : Optional[Any] = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() )
self.assertEqual(_lowercase , Namespace(foo=12 , bar=3.1_4 , baz="42" , ces=["a", "b", "c"] , des=[1, 2, 3] ) )
def lowerCamelCase__ ( self : Dict ):
__lowerCamelCase : List[str] = HfArgumentParser(_lowercase )
__lowerCamelCase : Dict = argparse.ArgumentParser()
expected.add_argument("--required_list" , nargs="+" , type=_lowercase , required=_lowercase )
expected.add_argument("--required_str" , type=_lowercase , required=_lowercase )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=_lowercase , )
self.argparsersEqual(_lowercase , _lowercase )
def lowerCamelCase__ ( self : Tuple ):
__lowerCamelCase : Tuple = HfArgumentParser(_lowercase )
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
expected.add_argument("--foo" , type=_lowercase , required=_lowercase )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=_lowercase , )
expected.add_argument("--opt" , type=_lowercase , default=_lowercase )
expected.add_argument("--baz" , default="toto" , type=_lowercase , help="help message" )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=_lowercase )
self.argparsersEqual(_lowercase , _lowercase )
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : Optional[int] = HfArgumentParser(_lowercase )
__lowerCamelCase : Dict = {
'foo': 12,
'bar': 3.1_4,
'baz': '42',
'flag': True,
}
__lowerCamelCase : List[str] = parser.parse_dict(_lowercase )[0]
__lowerCamelCase : List[Any] = BasicExample(**_lowercase )
self.assertEqual(_lowercase , _lowercase )
def lowerCamelCase__ ( self : Tuple ):
__lowerCamelCase : Dict = HfArgumentParser(_lowercase )
__lowerCamelCase : Optional[int] = {
'foo': 12,
'bar': 3.1_4,
'baz': '42',
'flag': True,
'extra': 42,
}
self.assertRaises(_lowercase , parser.parse_dict , _lowercase , allow_extra_keys=_lowercase )
def lowerCamelCase__ ( self : Optional[int] ):
__lowerCamelCase : Dict = HfArgumentParser(_lowercase )
__lowerCamelCase : List[str] = {
'foo': 12,
'bar': 3.1_4,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCamelCase : Any = os.path.join(_lowercase , "temp_json" )
os.mkdir(_lowercase )
with open(temp_local_path + ".json" , "w+" ) as f:
json.dump(_lowercase , _lowercase )
__lowerCamelCase : int = parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0]
__lowerCamelCase : List[str] = BasicExample(**_lowercase )
self.assertEqual(_lowercase , _lowercase )
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : List[str] = HfArgumentParser(_lowercase )
__lowerCamelCase : List[Any] = {
'foo': 12,
'bar': 3.1_4,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCamelCase : str = os.path.join(_lowercase , "temp_yaml" )
os.mkdir(_lowercase )
with open(temp_local_path + ".yaml" , "w+" ) as f:
yaml.dump(_lowercase , _lowercase )
__lowerCamelCase : List[Any] = parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0]
__lowerCamelCase : Optional[Any] = BasicExample(**_lowercase )
self.assertEqual(_lowercase , _lowercase )
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : str = HfArgumentParser(_lowercase )
self.assertIsNotNone(_lowercase ) | 707 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__A = {'''processing_wav2vec2_with_lm''': ['''Wav2Vec2ProcessorWithLM''']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 366 | 0 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
_lowercase = logging.get_logger(__name__)
_lowercase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowercase = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowercase = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowercase = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowercase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 512,
"""facebook/dpr-ctx_encoder-multiset-base""": 512,
}
_lowercase = {
"""facebook/dpr-question_encoder-single-nq-base""": 512,
"""facebook/dpr-question_encoder-multiset-base""": 512,
}
_lowercase = {
"""facebook/dpr-reader-single-nq-base""": 512,
"""facebook/dpr-reader-multiset-base""": 512,
}
_lowercase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowercase = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowercase = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class UpperCAmelCase_ ( snake_case__ ):
'''simple docstring'''
_lowercase : List[Any] = VOCAB_FILES_NAMES
_lowercase : Optional[int] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_lowercase : int = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Tuple = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowercase : Any = DPRContextEncoderTokenizer
class UpperCAmelCase_ ( snake_case__ ):
'''simple docstring'''
_lowercase : Union[str, Any] = VOCAB_FILES_NAMES
_lowercase : str = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_lowercase : Dict = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Optional[int] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowercase : List[str] = DPRQuestionEncoderTokenizer
_lowercase = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
_lowercase = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
_lowercase = R"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(snake_case__ )
class UpperCAmelCase_ :
'''simple docstring'''
def __call__( self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , **_lowercase , ):
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
elif titles is None or texts is None:
_lowerCAmelCase = titles if texts is None else texts
return super().__call__(
_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
_lowerCAmelCase = titles if not isinstance(_lowercase , _lowercase ) else [titles]
_lowerCAmelCase = texts if not isinstance(_lowercase , _lowercase ) else [texts]
_lowerCAmelCase = len(_lowercase )
_lowerCAmelCase = questions if not isinstance(_lowercase , _lowercase ) else [questions] * n_passages
assert len(_lowercase ) == len(
_lowercase ), F'There should be as many titles than texts but got {len(_lowercase )} titles and {len(_lowercase )} texts.'
_lowerCAmelCase = super().__call__(_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase )["""input_ids"""]
_lowerCAmelCase = super().__call__(_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase )["""input_ids"""]
_lowerCAmelCase = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_lowercase , _lowercase )
]
}
if return_attention_mask is not False:
_lowerCAmelCase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_lowerCAmelCase = attention_mask
return self.pad(_lowercase , padding=_lowercase , max_length=_lowercase , return_tensors=_lowercase )
def _lowercase ( self , _lowercase , _lowercase , _lowercase = 16 , _lowercase = 64 , _lowercase = 4 , ):
"""simple docstring"""
_lowerCAmelCase = reader_input["""input_ids"""]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = reader_output[:3]
_lowerCAmelCase = len(_lowercase )
_lowerCAmelCase = sorted(range(_lowercase ) , reverse=_lowercase , key=relevance_logits.__getitem__ )
_lowerCAmelCase = []
for doc_id in sorted_docs:
_lowerCAmelCase = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_lowerCAmelCase = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_lowerCAmelCase = sequence_ids.index(self.pad_token_id )
else:
_lowerCAmelCase = len(_lowercase )
_lowerCAmelCase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_lowercase , top_spans=_lowercase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_lowercase , start_index=_lowercase , end_index=_lowercase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_lowercase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , ):
"""simple docstring"""
_lowerCAmelCase = []
for start_index, start_score in enumerate(_lowercase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_lowerCAmelCase = sorted(_lowercase , key=lambda _lowercase : x[1] , reverse=_lowercase )
_lowerCAmelCase = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F'Wrong span indices: [{start_index}:{end_index}]'
_lowerCAmelCase = end_index - start_index + 1
assert length <= max_answer_length, F'Span is too long: {length} > {max_answer_length}'
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_lowercase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(snake_case__ )
class UpperCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
_lowercase : Tuple = VOCAB_FILES_NAMES
_lowercase : Tuple = READER_PRETRAINED_VOCAB_FILES_MAP
_lowercase : str = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Union[str, Any] = READER_PRETRAINED_INIT_CONFIGURATION
_lowercase : Tuple = ['''input_ids''', '''attention_mask''']
_lowercase : int = DPRReaderTokenizer
| 5 |
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
__a : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(snake_case__ )
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
requires_backends(self , "decord" )
self.check_model_type(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ) -> List[str]:
"""simple docstring"""
UpperCamelCase = {}
if frame_sampling_rate is not None:
UpperCamelCase = frame_sampling_rate
if num_frames is not None:
UpperCamelCase = num_frames
UpperCamelCase = {}
if top_k is not None:
UpperCamelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
return super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1 ) -> List[Any]:
"""simple docstring"""
if num_frames is None:
UpperCamelCase = self.model.config.num_frames
if video.startswith("http://" ) or video.startswith("https://" ):
UpperCamelCase = BytesIO(requests.get(SCREAMING_SNAKE_CASE ).content )
UpperCamelCase = VideoReader(SCREAMING_SNAKE_CASE )
videoreader.seek(0 )
UpperCamelCase = 0
UpperCamelCase = num_frames * frame_sampling_rate - 1
UpperCamelCase = np.linspace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num=SCREAMING_SNAKE_CASE , dtype=np.intaa )
UpperCamelCase = videoreader.get_batch(SCREAMING_SNAKE_CASE ).asnumpy()
UpperCamelCase = list(SCREAMING_SNAKE_CASE )
UpperCamelCase = self.image_processor(SCREAMING_SNAKE_CASE , return_tensors=self.framework )
return model_inputs
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase = self.model(**SCREAMING_SNAKE_CASE )
return model_outputs
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=5 ) -> Optional[Any]:
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCamelCase = self.model.config.num_labels
if self.framework == "pt":
UpperCamelCase = model_outputs.logits.softmax(-1 )[0]
UpperCamelCase , UpperCamelCase = probs.topk(SCREAMING_SNAKE_CASE )
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
UpperCamelCase = scores.tolist()
UpperCamelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )]
| 606 | 0 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[str]:
A_ = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def UpperCAmelCase__ ( UpperCAmelCase__ = 50_00 ) -> Optional[int]:
A_ = [(i * (3 * i - 1)) // 2 for i in range(1, _lowerCamelCase )]
for i, pentagonal_i in enumerate(_lowerCamelCase ):
for j in range(_lowerCamelCase, len(_lowerCamelCase ) ):
A_ = pentagonal_nums[j]
A_ = pentagonal_i + pentagonal_j
A_ = pentagonal_j - pentagonal_i
if is_pentagonal(_lowerCamelCase ) and is_pentagonal(_lowerCamelCase ):
return b
return -1
if __name__ == "__main__":
print(f"""{solution() = }""")
| 704 |
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
return EnvironmentCommand()
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return EnvironmentCommand(args.accelerate_config_file )
class A__ ( _snake_case ):
@staticmethod
def snake_case_ ( UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = parser.add_parser("""env""" )
download_parser.set_defaults(func=UpperCamelCase__ )
download_parser.add_argument(
"""--accelerate-config_file""" , default=UpperCamelCase__ , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=UpperCamelCase__ )
def __init__( self , UpperCamelCase__ , *UpperCamelCase__ ) -> None:
'''simple docstring'''
A_ = accelerate_config_file
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = """not installed"""
if is_safetensors_available():
import safetensors
A_ = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
A_ = f'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
A_ = """not installed"""
A_ = A_ = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
A_ = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(UpperCamelCase__ ):
A_ = load_config_from_file(self._accelerate_config_file ).to_dict()
A_ = (
"""\n""".join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCamelCase__ , UpperCamelCase__ )
else f'''\t{accelerate_config}'''
)
A_ = """not installed"""
A_ = """NA"""
if is_torch_available():
import torch
A_ = torch.__version__
A_ = torch.cuda.is_available()
A_ = """not installed"""
A_ = """NA"""
if is_tf_available():
import tensorflow as tf
A_ = tf.__version__
try:
# deprecated in v2.1
A_ = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
A_ = bool(tf.config.list_physical_devices("""GPU""" ) )
A_ = """not installed"""
A_ = """not installed"""
A_ = """not installed"""
A_ = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
A_ = flax.__version__
A_ = jax.__version__
A_ = jaxlib.__version__
A_ = jax.lib.xla_bridge.get_backend().platform
A_ = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": f'''{safetensors_version}''',
"""Accelerate version""": f'''{accelerate_version}''',
"""Accelerate config""": f'''{accelerate_config_str}''',
"""PyTorch version (GPU?)""": f'''{pt_version} ({pt_cuda_available})''',
"""Tensorflow version (GPU?)""": f'''{tf_version} ({tf_cuda_available})''',
"""Flax version (CPU?/GPU?/TPU?)""": f'''{flax_version} ({jax_backend})''',
"""Jax version""": f'''{jax_version}''',
"""JaxLib version""": f'''{jaxlib_version}''',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(UpperCamelCase__ ) )
return info
@staticmethod
def snake_case_ ( UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 667 | 0 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if len(SCREAMING_SNAKE_CASE__ ) < k or k < 0:
raise ValueError('''Invalid Input''' )
snake_case_ = snake_case_ = sum(array[:k] )
for i in range(len(SCREAMING_SNAKE_CASE__ ) - k ):
snake_case_ = current_sum - array[i] + array[i + k]
snake_case_ = max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
lowerCAmelCase_ = [randint(-10_00, 10_00) for i in range(1_00)]
lowerCAmelCase_ = randint(0, 1_10)
print(f"""The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}""") | 39 |
_A = '''Alexander Joslin'''
import operator as op
from .stack import Stack
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
lowerCAmelCase_ = Stack()
lowerCAmelCase_ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_A ) )
elif i in operators:
# RULE 2
operator_stack.push(_A )
elif i == ")":
# RULE 4
lowerCAmelCase_ = operator_stack.peek()
operator_stack.pop()
lowerCAmelCase_ = operand_stack.peek()
operand_stack.pop()
lowerCAmelCase_ = operand_stack.peek()
operand_stack.pop()
lowerCAmelCase_ = operators[opr](_A , _A )
operand_stack.push(_A )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
_A = '''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(f"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 431 | 0 |
"""simple docstring"""
from __future__ import annotations
def a__ ( __lowercase ) -> bool:
_A = str(__lowercase )
return n == n[::-1]
def a__ ( __lowercase = 100_0000 ) -> Dict:
_A = 0
for i in range(1 , __lowercase ):
if is_palindrome(__lowercase ) and is_palindrome(bin(__lowercase ).split("b" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip())))) | 621 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def a__ ( __lowercase ) -> Optional[int]:
_A = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
"decoder.output_projection.weight",
]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def a__ ( __lowercase ) -> List[Any]:
_A , _A = emb.weight.shape
_A = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
_A = emb.weight.data
return lin_layer
def a__ ( __lowercase , __lowercase="facebook/mbart-large-en-ro" , __lowercase=False , __lowercase=False ) -> List[str]:
_A = torch.load(__lowercase , map_location="cpu" )["model"]
remove_ignore_keys_(__lowercase )
_A = state_dict["encoder.embed_tokens.weight"].shape[0]
_A = MBartConfig.from_pretrained(__lowercase , vocab_size=__lowercase )
if mbart_aa and finetuned:
_A = "relu"
_A = state_dict["decoder.embed_tokens.weight"]
_A = MBartForConditionalGeneration(__lowercase )
model.model.load_state_dict(__lowercase )
if finetuned:
_A = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config",
default="facebook/mbart-large-cc25",
type=str,
help="Which huggingface architecture to use: mbart-large",
)
parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint")
parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint")
a_ = parser.parse_args()
a_ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path) | 621 | 1 |
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class SCREAMING_SNAKE_CASE( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , __snake_case : int = 16 , __snake_case : int = 88 , __snake_case : Optional[int] = None , __snake_case : int = 1 , __snake_case : float = 0.0 , __snake_case : int = 32 , __snake_case : Optional[int] = None , __snake_case : bool = False , __snake_case : Optional[int] = None , __snake_case : Optional[int] = None , __snake_case : str = "geglu" , __snake_case : Optional[int] = None , ) -> Optional[int]:
super().__init__()
UpperCAmelCase : str = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=_snake_case , attention_head_dim=_snake_case , in_channels=_snake_case , num_layers=_snake_case , dropout=_snake_case , norm_num_groups=_snake_case , cross_attention_dim=_snake_case , attention_bias=_snake_case , sample_size=_snake_case , num_vector_embeds=_snake_case , activation_fn=_snake_case , num_embeds_ada_norm=_snake_case , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
UpperCAmelCase : Tuple = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
UpperCAmelCase : Optional[Any] = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
UpperCAmelCase : Any = [1, 0]
def A ( self : Union[str, Any] , __snake_case : Dict , __snake_case : Any , __snake_case : Optional[Any]=None , __snake_case : List[str]=None , __snake_case : Optional[int]=None , __snake_case : bool = True , ) -> Any:
UpperCAmelCase : str = hidden_states
UpperCAmelCase : int = []
UpperCAmelCase : Dict = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
UpperCAmelCase : str = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
UpperCAmelCase : List[Any] = self.transformer_index_for_condition[i]
UpperCAmelCase : int = self.transformers[transformer_index](
_snake_case , encoder_hidden_states=_snake_case , timestep=_snake_case , cross_attention_kwargs=_snake_case , return_dict=_snake_case , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
UpperCAmelCase : Optional[int] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
UpperCAmelCase : Optional[Any] = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=_snake_case )
| 127 | """simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : Any ) -> Optional[Any]:
'''simple docstring'''
a__ = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
a__ = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(_snake_case )
from datasets import load_dataset
a__ = load_dataset('nielsr/rvlcdip-demo' )
a__ = dataset['train'][0]['image'].convert('RGB' )
a__ = image_processor(_snake_case , return_tensors='pt' ).to(_snake_case )
# forward pass
with torch.no_grad():
a__ = model(**_snake_case )
a__ = outputs.logits
a__ = torch.Size((1, 16) )
self.assertEqual(logits.shape , _snake_case )
a__ = torch.tensor(
[-0.4158, -0.4092, -0.4347] , device=_snake_case , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , _snake_case , atol=1E-4 ) )
| 232 | 0 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase ) -> list:
lowercase__ : int = int(__lowerCamelCase )
if n_element < 1:
lowercase__ : List[str] = ValueError('''a should be a positive number''' )
raise my_error
lowercase__ : int = [1]
lowercase__ , lowercase__ , lowercase__ : int = (0, 0, 0)
lowercase__ : Dict = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase_ = input('Enter the last number (nth term) of the Hamming Number Series: ')
print('Formula of Hamming Number Series => 2^i * 3^j * 5^k')
lowerCAmelCase_ = hamming(int(n))
print('-----------------------------------------------------')
print(F'''The list with nth numbers is: {hamming_numbers}''')
print('-----------------------------------------------------')
| 122 |
"""simple docstring"""
import cmath
import math
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> complex:
lowercase__ : str = math.radians(__lowerCamelCase )
lowercase__ : Union[str, Any] = math.radians(__lowerCamelCase )
# Convert voltage and current to rectangular form
lowercase__ : Tuple = cmath.rect(__lowerCamelCase , __lowerCamelCase )
lowercase__ : Union[str, Any] = cmath.rect(__lowerCamelCase , __lowerCamelCase )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 122 | 1 |
import os
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
with open(os.path.dirname(snake_case__ ) + '/grid.txt' ) as f:
_lowercase = [] # noqa: E741
for _ in range(20 ):
l.append([int(snake_case__ ) for x in f.readline().split()] )
_lowercase = 0
# right
for i in range(20 ):
for j in range(17 ):
_lowercase = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
_lowercase = temp
# down
for i in range(17 ):
for j in range(20 ):
_lowercase = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
_lowercase = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
_lowercase = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
_lowercase = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
_lowercase = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
_lowercase = temp
return maximum
if __name__ == "__main__":
print(solution()) | 67 |
"""simple docstring"""
class _lowerCAmelCase :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> int:
_SCREAMING_SNAKE_CASE : Optional[int] = data
_SCREAMING_SNAKE_CASE : Tuple = previous
_SCREAMING_SNAKE_CASE : Any = next_node
def __str__( self ) -> str:
return F"""{self.data}"""
def A ( self ) -> int:
return self.data
def A ( self ) -> Dict:
return self.next
def A ( self ) -> Union[str, Any]:
return self.previous
class _lowerCAmelCase :
def __init__( self , lowerCAmelCase_ ) -> List[str]:
_SCREAMING_SNAKE_CASE : List[Any] = head
def __iter__( self ) -> Any:
return self
def A ( self ) -> Dict:
if not self.current:
raise StopIteration
else:
_SCREAMING_SNAKE_CASE : Any = self.current.get_data()
_SCREAMING_SNAKE_CASE : Dict = self.current.get_next()
return value
class _lowerCAmelCase :
def __init__( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Dict = None # First node in list
_SCREAMING_SNAKE_CASE : Union[str, Any] = None # Last node in list
def __str__( self ) -> Any:
_SCREAMING_SNAKE_CASE : str = self.head
_SCREAMING_SNAKE_CASE : int = []
while current is not None:
nodes.append(current.get_data() )
_SCREAMING_SNAKE_CASE : List[Any] = current.get_next()
return " ".join(str(lowerCAmelCase_ ) for node in nodes )
def __contains__( self , lowerCAmelCase_ ) -> Any:
_SCREAMING_SNAKE_CASE : Any = self.head
while current:
if current.get_data() == value:
return True
_SCREAMING_SNAKE_CASE : int = current.get_next()
return False
def __iter__( self ) -> str:
return LinkedListIterator(self.head )
def A ( self ) -> Dict:
if self.head:
return self.head.get_data()
return None
def A ( self ) -> List[Any]:
if self.tail:
return self.tail.get_data()
return None
def A ( self , lowerCAmelCase_ ) -> None:
if self.head is None:
_SCREAMING_SNAKE_CASE : List[Any] = node
_SCREAMING_SNAKE_CASE : Dict = node
else:
self.insert_before_node(self.head , lowerCAmelCase_ )
def A ( self , lowerCAmelCase_ ) -> None:
if self.head is None:
self.set_head(lowerCAmelCase_ )
else:
self.insert_after_node(self.tail , lowerCAmelCase_ )
def A ( self , lowerCAmelCase_ ) -> None:
_SCREAMING_SNAKE_CASE : Any = Node(lowerCAmelCase_ )
if self.head is None:
self.set_head(lowerCAmelCase_ )
else:
self.set_tail(lowerCAmelCase_ )
def A ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
_SCREAMING_SNAKE_CASE : Optional[Any] = node
_SCREAMING_SNAKE_CASE : List[Any] = node.previous
if node.get_previous() is None:
_SCREAMING_SNAKE_CASE : List[Any] = node_to_insert
else:
_SCREAMING_SNAKE_CASE : Optional[int] = node_to_insert
_SCREAMING_SNAKE_CASE : Any = node_to_insert
def A ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
_SCREAMING_SNAKE_CASE : Optional[Any] = node
_SCREAMING_SNAKE_CASE : Optional[int] = node.next
if node.get_next() is None:
_SCREAMING_SNAKE_CASE : Tuple = node_to_insert
else:
_SCREAMING_SNAKE_CASE : Any = node_to_insert
_SCREAMING_SNAKE_CASE : List[str] = node_to_insert
def A ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
_SCREAMING_SNAKE_CASE : List[Any] = 1
_SCREAMING_SNAKE_CASE : str = Node(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.head
while node:
if current_position == position:
self.insert_before_node(lowerCAmelCase_ , lowerCAmelCase_ )
return
current_position += 1
_SCREAMING_SNAKE_CASE : List[str] = node.next
self.insert_after_node(self.tail , lowerCAmelCase_ )
def A ( self , lowerCAmelCase_ ) -> Node:
_SCREAMING_SNAKE_CASE : Tuple = self.head
while node:
if node.get_data() == item:
return node
_SCREAMING_SNAKE_CASE : Dict = node.get_next()
raise Exception('Node not found' )
def A ( self , lowerCAmelCase_ ) -> int:
if (node := self.get_node(lowerCAmelCase_ )) is not None:
if node == self.head:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.head.get_next()
if node == self.tail:
_SCREAMING_SNAKE_CASE : int = self.tail.get_previous()
self.remove_node_pointers(lowerCAmelCase_ )
@staticmethod
def A ( lowerCAmelCase_ ) -> None:
if node.get_next():
_SCREAMING_SNAKE_CASE : List[Any] = node.previous
if node.get_previous():
_SCREAMING_SNAKE_CASE : str = node.next
_SCREAMING_SNAKE_CASE : Optional[int] = None
_SCREAMING_SNAKE_CASE : List[Any] = None
def A ( self ) -> List[str]:
return self.head is None
def lowercase__ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 621 | 0 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = SwinConfig.from_pretrained(
"""microsoft/swin-tiny-patch4-window7-224""" , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
__lowercase = MaskFormerConfig(backbone_config=lowerCamelCase )
__lowercase = """huggingface/label-files"""
if "ade20k-full" in model_name:
# this should be ok
__lowercase = 847
__lowercase = """maskformer-ade20k-full-id2label.json"""
elif "ade" in model_name:
# this should be ok
__lowercase = 150
__lowercase = """ade20k-id2label.json"""
elif "coco-stuff" in model_name:
# this should be ok
__lowercase = 171
__lowercase = """maskformer-coco-stuff-id2label.json"""
elif "coco" in model_name:
# TODO
__lowercase = 133
__lowercase = """coco-panoptic-id2label.json"""
elif "cityscapes" in model_name:
# this should be ok
__lowercase = 19
__lowercase = """cityscapes-id2label.json"""
elif "vistas" in model_name:
# this should be ok
__lowercase = 65
__lowercase = """mapillary-vistas-id2label.json"""
__lowercase = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__lowercase = {int(lowerCamelCase ): v for k, v in idalabel.items()}
return config
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = []
# stem
# fmt: off
rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.layers.{i}.downsample.reduction.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') )
# FPN
rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'sem_seg_head.adapter_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') )
rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") )
rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') )
# cross-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') )
# MLP 1
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', F'model.transformer_module.decoder.layers.{idx}.fc1.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', F'model.transformer_module.decoder.layers.{idx}.fc1.bias') )
# MLP 2
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', F'model.transformer_module.decoder.layers.{idx}.fc2.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', F'model.transformer_module.decoder.layers.{idx}.fc2.bias') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') )
# layernorm 3 (final layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") )
# heads on top
rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") )
for i in range(3 ):
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.weight', F'mask_embedder.{i}.0.weight') )
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.bias', F'mask_embedder.{i}.0.bias') )
# fmt: on
return rename_keys
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = dct.pop(lowerCamelCase )
__lowercase = val
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowercase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' )
__lowercase = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[:dim, :]
__lowercase = in_proj_bias[: dim]
__lowercase = in_proj_weight[
dim : dim * 2, :
]
__lowercase = in_proj_bias[
dim : dim * 2
]
__lowercase = in_proj_weight[
-dim :, :
]
__lowercase = in_proj_bias[-dim :]
# fmt: on
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' )
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[: hidden_size, :]
__lowercase = in_proj_bias[:config.hidden_size]
__lowercase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowercase = in_proj_bias[hidden_size : hidden_size * 2]
__lowercase = in_proj_weight[-hidden_size :, :]
__lowercase = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' )
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[: hidden_size, :]
__lowercase = in_proj_bias[:config.hidden_size]
__lowercase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowercase = in_proj_bias[hidden_size : hidden_size * 2]
__lowercase = in_proj_weight[-hidden_size :, :]
__lowercase = in_proj_bias[-hidden_size :]
# fmt: on
def snake_case ( ):
'''simple docstring'''
__lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowercase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False ):
'''simple docstring'''
__lowercase = get_maskformer_config(lowerCamelCase )
# load original state_dict
with open(lowerCamelCase , """rb""" ) as f:
__lowercase = pickle.load(lowerCamelCase )
__lowercase = data["""model"""]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__lowercase = create_rename_keys(lowerCamelCase )
for src, dest in rename_keys:
rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase )
read_in_swin_q_k_v(lowerCamelCase , config.backbone_config )
read_in_decoder_q_k_v(lowerCamelCase , lowerCamelCase )
# update to torch tensors
for key, value in state_dict.items():
__lowercase = torch.from_numpy(lowerCamelCase )
# load 🤗 model
__lowercase = MaskFormerForInstanceSegmentation(lowerCamelCase )
model.eval()
for name, param in model.named_parameters():
print(lowerCamelCase , param.shape )
__lowercase , __lowercase = model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowerCamelCase ) == 0, F'Unexpected keys: {unexpected_keys}'
# verify results
__lowercase = prepare_img()
if "vistas" in model_name:
__lowercase = 65
elif "cityscapes" in model_name:
__lowercase = 65_535
else:
__lowercase = 255
__lowercase = True if """ade""" in model_name else False
__lowercase = MaskFormerImageProcessor(ignore_index=lowerCamelCase , reduce_labels=lowerCamelCase )
__lowercase = image_processor(lowerCamelCase , return_tensors="""pt""" )
__lowercase = model(**lowerCamelCase )
print("""Logits:""" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__lowercase = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
image_processor.save_pretrained(lowerCamelCase )
if push_to_hub:
print("""Pushing model and image processor to the hub...""" )
model.push_to_hub(F'nielsr/{model_name}' )
image_processor.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
__UpperCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""maskformer-swin-tiny-ade""",
type=str,
help=("""Name of the MaskFormer model you'd like to convert""",),
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""",
type=str,
help="""Path to the original state dict (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__UpperCamelCase : List[Any] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 706 |
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__UpperCamelCase : Tuple = 2
class __UpperCamelCase :
def __init__( self : List[str] , *, # begin keyword-only arguments
_lowerCAmelCase : Optional[int]="<s>" , _lowerCAmelCase : Optional[int]="<pad>" , _lowerCAmelCase : int="</s>" , _lowerCAmelCase : str="<unk>" , _lowerCAmelCase : List[str]=None , ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase , __lowercase , __lowercase = bos, unk, pad, eos
__lowercase = []
__lowercase = []
__lowercase = {}
__lowercase = self.add_symbol(_lowerCAmelCase )
__lowercase = self.add_symbol(_lowerCAmelCase )
__lowercase = self.add_symbol(_lowerCAmelCase )
__lowercase = self.add_symbol(_lowerCAmelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(_lowerCAmelCase )
__lowercase = len(self.symbols )
def __eq__( self : Dict , _lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
return self.indices == other.indices
def __getitem__( self : Any , _lowerCAmelCase : str ) -> Dict:
"""simple docstring"""
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : str ) -> List[str]:
"""simple docstring"""
return len(self.symbols )
def __contains__( self : Union[str, Any] , _lowerCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
return sym in self.indices
@classmethod
def _a ( cls : Dict , _lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = cls()
d.add_from_file(_lowerCAmelCase )
return d
def _a ( self : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any]=1 , _lowerCAmelCase : Optional[int]=False ) -> Union[str, Any]:
"""simple docstring"""
if word in self.indices and not overwrite:
__lowercase = self.indices[word]
__lowercase = self.count[idx] + n
return idx
else:
__lowercase = len(self.symbols )
__lowercase = idx
self.symbols.append(_lowerCAmelCase )
self.count.append(_lowerCAmelCase )
return idx
def _a ( self : List[str] , _lowerCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return 0
def _a ( self : Optional[Any] , _lowerCAmelCase : Dict ) -> str:
"""simple docstring"""
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
try:
with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(_lowerCAmelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("""Incorrect encoding detected in {}, please rebuild the dataset""".format(_lowerCAmelCase ) )
return
__lowercase = f.readlines()
__lowercase = self._load_meta(_lowerCAmelCase )
for line in lines[indices_start_line:]:
try:
__lowercase , __lowercase = line.rstrip().rsplit(""" """ , 1 )
if field == "#fairseq:overwrite":
__lowercase = True
__lowercase , __lowercase = line.rsplit(""" """ , 1 )
else:
__lowercase = False
__lowercase = int(_lowerCAmelCase )
__lowercase = line
if word in self and not overwrite:
raise RuntimeError(
"""Duplicate word found when loading Dictionary: '{}'. """
"""Duplicate words can overwrite earlier ones by adding the """
"""#fairseq:overwrite flag at the end of the corresponding row """
"""in the dictionary file. If using the Camembert model, please """
"""download an updated copy of the model file.""".format(_lowerCAmelCase ) )
self.add_symbol(_lowerCAmelCase , n=_lowerCAmelCase , overwrite=_lowerCAmelCase )
except ValueError:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt> [flags]'""" )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = dict((re.sub(r"""@@$""" , """""" , lowerCamelCase ), v) if k.endswith("""@@""" ) else (re.sub(r"""$""" , """</w>""" , lowerCamelCase ), v) for k, v in d.items() )
__lowercase = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
__lowercase = d[k] # restore
return da
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if not os.path.exists(lowerCamelCase ):
raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
__lowercase = os.path.join(lowerCamelCase , """checkpoint.pt""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {checkpoint_file} does not exist!' )
__lowercase = torch.load(lowerCamelCase , map_location="""cpu""" )
__lowercase = chkpt["""cfg"""]["""model"""]
# dicts
__lowercase = os.path.join(lowerCamelCase , """dict.txt""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {dict_file} does not exist!' )
__lowercase = Dictionary.load(lowerCamelCase )
__lowercase = rewrite_dict_keys(src_dict.indices )
__lowercase = len(lowerCamelCase )
__lowercase = os.path.join(lowerCamelCase , VOCAB_FILES_NAMES["""vocab_file"""] )
print(F'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) )
# merges_file (bpecodes)
__lowercase = os.path.join(lowerCamelCase , """bpecodes""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {bpecodes_file} does not exist!' )
__lowercase = os.path.join(lowerCamelCase , VOCAB_FILES_NAMES["""merges_file"""] )
shutil.copyfile(lowerCamelCase , lowerCamelCase )
# model config
__lowercase = os.path.join(lowerCamelCase , """config.json""" )
__lowercase = {
"""activation_dropout""": args["""activation_dropout"""],
"""architectures""": ["""BioGptForCausalLM"""],
"""attention_probs_dropout_prob""": args["""attention_dropout"""],
"""bos_token_id""": 0,
"""eos_token_id""": 2,
"""hidden_act""": args["""activation_fn"""],
"""hidden_dropout_prob""": args["""dropout"""],
"""hidden_size""": args["""decoder_embed_dim"""],
"""initializer_range""": 0.02,
"""intermediate_size""": args["""decoder_ffn_embed_dim"""],
"""layer_norm_eps""": 1e-12,
"""layerdrop""": args["""decoder_layerdrop"""],
"""max_position_embeddings""": args["""max_target_positions"""],
"""model_type""": """biogpt""",
"""num_attention_heads""": args["""decoder_attention_heads"""],
"""num_hidden_layers""": args["""decoder_layers"""],
"""pad_token_id""": 1,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_decoder_input_output_embed"""],
"""vocab_size""": src_vocab_size,
}
# good hparam defaults to start with
print(F'Generating {biogpt_model_config_file}' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) )
# tokenizer config
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
__lowercase = {
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
"""model_max_length""": 1_024,
"""pad_token""": """<pad>""",
"""special_tokens_map_file""": None,
"""tokenizer_class""": """BioGptTokenizer""",
"""unk_token""": """<unk>""",
}
print(F'Generating {biogpt_tokenizer_config_file}' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) )
# model
__lowercase = chkpt["""model"""]
# remove unneeded keys
__lowercase = [
"""decoder.version""",
]
for k in ignore_keys:
model_state_dict.pop(lowerCamelCase , lowerCamelCase )
__lowercase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("""output_projection.weight""" ):
__lowercase = model_state_dict.pop(lowerCamelCase )
else:
__lowercase = model_state_dict.pop(lowerCamelCase )
__lowercase = BioGptConfig.from_pretrained(lowerCamelCase )
__lowercase = BioGptForCausalLM(lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(lowerCamelCase )
# save
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(lowerCamelCase , lowerCamelCase )
print("""Conversion is done!""" )
if __name__ == "__main__":
__UpperCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--biogpt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__UpperCamelCase : Optional[Any] = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 53 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.