code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class __A :
'''simple docstring'''
def __init__(self , A , A=13 , A=7 , A=True , A=True , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> str:
"""simple docstring"""
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = scope
_a = self.vocab_size - 1
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = ids_tensor([self.batch_size] , self.num_choices )
_a = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
_a = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def a__ (self , A , A , A , A , *A ) -> List[Any]:
"""simple docstring"""
_a = OpenAIGPTModel(config=A )
model.to(A )
model.eval()
_a = model(A , token_type_ids=A , head_mask=A )
_a = model(A , token_type_ids=A )
_a = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self , A , A , A , A , *A ) -> Any:
"""simple docstring"""
_a = OpenAIGPTLMHeadModel(A )
model.to(A )
model.eval()
_a = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self , A , A , A , A , *A ) -> Union[str, Any]:
"""simple docstring"""
_a = OpenAIGPTDoubleHeadsModel(A )
model.to(A )
model.eval()
_a = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self , A , A , A , A , *A ) -> Any:
"""simple docstring"""
_a = self.num_labels
_a = OpenAIGPTForSequenceClassification(A )
model.to(A )
model.eval()
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
(
_a
) = config_and_inputs
_a = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class __A ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
__lowerCamelCase : Any = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
__lowerCamelCase : Tuple = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def a__ (self , A , A , A , A , A ) -> Any:
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def a__ (self , A , A , A=False ) -> Any:
"""simple docstring"""
_a = super()._prepare_for_class(A , A , return_labels=A )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
_a = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=A , )
_a = inputs_dict['labels']
_a = inputs_dict['labels']
_a = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=A , )
_a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
return inputs_dict
def a__ (self ) -> int:
"""simple docstring"""
_a = OpenAIGPTModelTester(self )
_a = ConfigTester(self , config_class=A , n_embd=37 )
def a__ (self ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*A )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*A )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*A )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*A )
@slow
def a__ (self ) -> List[str]:
"""simple docstring"""
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = OpenAIGPTModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(A )
_a = torch.tensor([[481, 4_735, 544]] , dtype=torch.long , device=A ) # the president is
_a = [
481,
4_735,
544,
246,
963,
870,
762,
239,
244,
40_477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
_a = model.generate(A , do_sample=A )
self.assertListEqual(output_ids[0].tolist() , A )
| 11 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
SCREAMING_SNAKE_CASE_ = "cuda" if torch.cuda.is_available() else "cpu"
def lowerCamelCase__ ( a__ , a__=1_0_0 , a__=" ") -> List[str]:
"""simple docstring"""
_snake_case : Optional[Any] = text.split(a__)
return [character.join(text[i : i + n]).strip() for i in range(0 , len(a__) , a__)]
def lowerCamelCase__ ( a__) -> dict:
"""simple docstring"""
_snake_case , _snake_case : List[Any] = [], []
for title, text in zip(documents['title'] , documents['text']):
if text is not None:
for passage in split_text(a__):
titles.append(title if title is not None else '')
texts.append(a__)
return {"title": titles, "text": texts}
def lowerCamelCase__ ( a__ , a__ , a__) -> dict:
"""simple docstring"""
_snake_case : Optional[int] = ctx_tokenizer(
documents['title'] , documents['text'] , truncation=a__ , padding='longest' , return_tensors='pt')['input_ids']
_snake_case : List[str] = ctx_encoder(input_ids.to(device=a__) , return_dict=a__).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowerCamelCase__ ( a__ , a__ , a__ , ) -> Optional[int]:
"""simple docstring"""
logger.info('Step 1 - Create the dataset')
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
_snake_case : List[Any] = load_dataset(
'csv' , data_files=[rag_example_args.csv_path] , split='train' , delimiter='\t' , column_names=['title', 'text'])
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
_snake_case : Optional[Any] = dataset.map(a__ , batched=a__ , num_proc=processing_args.num_proc)
# And compute the embeddings
_snake_case : Any = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name).to(device=a__)
_snake_case : Tuple = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name)
_snake_case : Optional[int] = Features(
{'text': Value('string'), 'title': Value('string'), 'embeddings': Sequence(Value('float32'))}) # optional, save as float32 instead of float64 to save space
_snake_case : List[str] = dataset.map(
partial(a__ , ctx_encoder=a__ , ctx_tokenizer=a__) , batched=a__ , batch_size=processing_args.batch_size , features=a__ , )
# And finally save your dataset
_snake_case : Optional[Any] = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset')
dataset.save_to_disk(a__)
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('Step 2 - Index the dataset')
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
_snake_case : Dict = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT)
dataset.add_faiss_index('embeddings' , custom_index=a__)
# And save the index
_snake_case : str = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset_hnsw_index.faiss')
dataset.get_index('embeddings').save(a__)
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = field(
default=str(Path(lowercase_ ).parent / '''test_run''' / '''dummy-kb''' / '''my_knowledge_dataset.csv''' ) ,metadata={'''help''': '''Path to a tab-separated csv file with columns \'title\' and \'text\''''} ,)
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=lowercase_ ,metadata={'''help''': '''Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'''} ,)
SCREAMING_SNAKE_CASE__ : str = field(
default='''facebook/rag-sequence-nq''' ,metadata={'''help''': '''The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''''} ,)
SCREAMING_SNAKE_CASE__ : str = field(
default='''facebook/dpr-ctx_encoder-multiset-base''' ,metadata={
'''help''': (
'''The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'''
''' \'facebook/dpr-ctx_encoder-multiset-base\''''
)
} ,)
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=str(Path(lowercase_ ).parent / '''test_run''' / '''dummy-kb''' ) ,metadata={'''help''': '''Path to a directory where the dataset passages and the index will be saved'''} ,)
@dataclass
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = field(
default=lowercase_ ,metadata={
'''help''': '''The number of processes to use to split the documents into passages. Default is single process.'''
} ,)
SCREAMING_SNAKE_CASE__ : int = field(
default=16 ,metadata={
'''help''': '''The batch size to use when computing the passages embeddings using the DPR context encoder.'''
} ,)
@dataclass
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = field(
default=768 ,metadata={'''help''': '''The dimension of the embeddings to pass to the HNSW Faiss index.'''} ,)
SCREAMING_SNAKE_CASE__ : int = field(
default=128 ,metadata={
'''help''': (
'''The number of bi-directional links created for every new element during the HNSW index construction.'''
)
} ,)
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
SCREAMING_SNAKE_CASE_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 517 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCamelCase = {'configuration_van': ['VAN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VanConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'VAN_PRETRAINED_MODEL_ARCHIVE_LIST',
'VanForImageClassification',
'VanModel',
'VanPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 572 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_lowerCamelCase = {
'vocab_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-german-cased': (
'https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'
),
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'
),
},
}
_lowerCamelCase = {
'distilbert-base-uncased': 5_1_2,
'distilbert-base-uncased-distilled-squad': 5_1_2,
'distilbert-base-cased': 5_1_2,
'distilbert-base-cased-distilled-squad': 5_1_2,
'distilbert-base-german-cased': 5_1_2,
'distilbert-base-multilingual-cased': 5_1_2,
}
_lowerCamelCase = {
'distilbert-base-uncased': {'do_lower_case': True},
'distilbert-base-uncased-distilled-squad': {'do_lower_case': True},
'distilbert-base-cased': {'do_lower_case': False},
'distilbert-base-cased-distilled-squad': {'do_lower_case': False},
'distilbert-base-german-cased': {'do_lower_case': False},
'distilbert-base-multilingual-cased': {'do_lower_case': False},
}
class __a ( _snake_case ):
__SCREAMING_SNAKE_CASE : Any = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE : Tuple = ['input_ids', 'attention_mask']
__SCREAMING_SNAKE_CASE : Tuple = DistilBertTokenizer
def __init__( self : Tuple , lowercase__ : Tuple=None , lowercase__ : Dict=None , lowercase__ : Dict=True , lowercase__ : Tuple="[UNK]" , lowercase__ : Optional[int]="[SEP]" , lowercase__ : Union[str, Any]="[PAD]" , lowercase__ : Optional[Any]="[CLS]" , lowercase__ : Dict="[MASK]" , lowercase__ : int=True , lowercase__ : List[Any]=None , **lowercase__ : Tuple , ) ->Optional[int]:
"""simple docstring"""
super().__init__(
lowercase__ , tokenizer_file=lowercase__ , do_lower_case=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , pad_token=lowercase__ , cls_token=lowercase__ , mask_token=lowercase__ , tokenize_chinese_chars=lowercase__ , strip_accents=lowercase__ , **lowercase__ , )
_lowercase = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("""lowercase""" , lowercase__) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowercase__) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowercase__) != tokenize_chinese_chars
):
_lowercase = getattr(lowercase__ , normalizer_state.pop("""type"""))
_lowercase = do_lower_case
_lowercase = strip_accents
_lowercase = tokenize_chinese_chars
_lowercase = normalizer_class(**lowercase__)
_lowercase = do_lower_case
def _UpperCAmelCase ( self : Dict , lowercase__ : Optional[int] , lowercase__ : List[str]=None) ->Union[str, Any]:
"""simple docstring"""
_lowercase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCAmelCase ( self : Tuple , lowercase__ : List[int] , lowercase__ : Optional[List[int]] = None) ->List[int]:
"""simple docstring"""
_lowercase = [self.sep_token_id]
_lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _UpperCAmelCase ( self : Dict , lowercase__ : str , lowercase__ : Optional[str] = None) ->Tuple[str]:
"""simple docstring"""
_lowercase = self._tokenizer.model.save(lowercase__ , name=lowercase__)
return tuple(lowercase__)
| 572 | 1 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Dict = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : List[str]=0 ) -> str:
A = floats_tensor((1, 3, 128, 128) ,rng=random.Random(A_ ) )
A = np.random.RandomState(A_ )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=A_ )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
# warmup pass to apply optimizations
A = pipe(**self.get_dummy_inputs() )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
A = ort.SessionOptions()
A = False
return options
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
A = init_image.resize((768, 512) )
# using the PNDM scheduler by default
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='onnx' ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=A_ )
A = 'A fantasy landscape, trending on artstation'
A = np.random.RandomState(0 )
A = pipe(
prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=A_ ,output_type='np' ,)
A = output.images
A = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
A = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
A = init_image.resize((768, 512) )
A = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,subfolder='scheduler' ,revision='onnx' )
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,scheduler=A_ ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=A_ )
A = 'A fantasy landscape, trending on artstation'
A = np.random.RandomState(0 )
A = pipe(
prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=A_ ,output_type='np' ,)
A = output.images
A = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
A = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 | 91 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
lowercase : Tuple = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowercase : Dict = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
lowercase : List[Any] = {
"""unc-nlp/lxmert-base-uncased""": 512,
}
lowercase : Tuple = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class _a (a__ ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = VOCAB_FILES_NAMES
lowerCAmelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Any = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : Optional[int] = LxmertTokenizer
def __init__( self ,__a=None ,__a=None ,__a=True ,__a="[UNK]" ,__a="[SEP]" ,__a="[PAD]" ,__a="[CLS]" ,__a="[MASK]" ,__a=True ,__a=None ,**__a ,) -> str:
super().__init__(
__a ,tokenizer_file=__a ,do_lower_case=__a ,unk_token=__a ,sep_token=__a ,pad_token=__a ,cls_token=__a ,mask_token=__a ,tokenize_chinese_chars=__a ,strip_accents=__a ,**__a ,)
snake_case : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,__a ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,__a ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,__a ) != tokenize_chinese_chars
):
snake_case : Union[str, Any] = getattr(__a ,normalizer_state.pop("""type""" ) )
snake_case : Optional[int] = do_lower_case
snake_case : Optional[int] = strip_accents
snake_case : List[Any] = tokenize_chinese_chars
snake_case : Union[str, Any] = normalizer_class(**__a )
snake_case : str = do_lower_case
def snake_case_ ( self ,__a ,__a=None ) -> Optional[int]:
snake_case : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ ( self ,__a ,__a = None ) -> List[int]:
snake_case : int = [self.sep_token_id]
snake_case : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ ( self ,__a ,__a = None ) -> Tuple[str]:
snake_case : Optional[int] = self._tokenizer.model.save(__a ,name=__a )
return tuple(__a )
| 116 | 0 |
'''simple docstring'''
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
a= getLogger(__name__)
a= '''cuda''' if torch.cuda.is_available() else '''cpu'''
def _UpperCamelCase ( _a : List[str] , _a : str , _a : str , _a : int = 8 , _a : str = DEFAULT_DEVICE , _a : Union[str, Any]=False , _a : int="summarization" , _a : Optional[int]=None , **_a : Dict , ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = Path(_a ).open('w' , encoding='utf-8' )
__UpperCamelCase : Tuple = str(_a )
__UpperCamelCase : List[str] = AutoModelForSeqaSeqLM.from_pretrained(_a ).to(_a )
if fpaa:
__UpperCamelCase : List[Any] = model.half()
__UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained(_a )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
__UpperCamelCase : Tuple = time.time()
# update config with task specific params
use_task_specific_params(_a , _a )
if prefix is None:
__UpperCamelCase : Tuple = prefix or getattr(model.config , 'prefix' , '' ) or ''
for examples_chunk in tqdm(list(chunks(_a , _a ) ) ):
__UpperCamelCase : str = [prefix + text for text in examples_chunk]
__UpperCamelCase : str = tokenizer(_a , return_tensors='pt' , truncation=_a , padding='longest' ).to(_a )
__UpperCamelCase : Optional[Any] = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **_a , )
__UpperCamelCase : Tuple = tokenizer.batch_decode(_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a )
for hypothesis in dec:
fout.write(hypothesis + '\n' )
fout.flush()
fout.close()
__UpperCamelCase : Tuple = int(time.time() - start_time ) # seconds
__UpperCamelCase : List[Any] = len(_a )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def _UpperCamelCase ( ):
"""simple docstring"""
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )
def _UpperCamelCase ( _a : List[Any]=True ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('model_name' , type=_a , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('input_path' , type=_a , help='like cnn_dm/test.source' )
parser.add_argument('save_path' , type=_a , help='where to save summaries' )
parser.add_argument('--reference_path' , type=_a , required=_a , help='like cnn_dm/test.target' )
parser.add_argument('--score_path' , type=_a , required=_a , default='metrics.json' , help='where to save metrics' )
parser.add_argument('--device' , type=_a , required=_a , default=_a , help='cuda, cuda:1, cpu etc.' )
parser.add_argument(
'--prefix' , type=_a , required=_a , default=_a , help='will be added to the begininng of src examples' )
parser.add_argument('--task' , type=_a , default='summarization' , help='used for task_specific_params + metrics' )
parser.add_argument('--bs' , type=_a , default=8 , required=_a , help='batch size' )
parser.add_argument(
'--n_obs' , type=_a , default=-1 , required=_a , help='How many observations. Defaults to all.' )
parser.add_argument('--fp16' , action='store_true' )
parser.add_argument('--dump-args' , action='store_true' , help='print the custom hparams with the results' )
parser.add_argument(
'--info' , nargs='?' , type=_a , const=datetime_now() , help=(
'use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'
' lang=en-ru. If no value is passed, the current datetime string will be used.'
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__UpperCamelCase : str = parser.parse_known_args()
__UpperCamelCase : str = parse_numeric_n_bool_cl_kwargs(_a )
if parsed_args and verbose:
print(f"""parsed the following generate kwargs: {parsed_args}""" )
__UpperCamelCase : List[Any] = [' ' + x.rstrip() if 't5' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__UpperCamelCase : int = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=_a )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('Can\'t mix --fp16 and --device cpu' )
__UpperCamelCase : str = generate_summaries_or_translations(
_a , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **_a , )
if args.reference_path is None:
return {}
# Compute scores
__UpperCamelCase : List[Any] = calculate_bleu if 'translation' in args.task else calculate_rouge
__UpperCamelCase : Union[str, Any] = [x.rstrip() for x in open(args.save_path ).readlines()]
__UpperCamelCase : Union[str, Any] = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_a )]
__UpperCamelCase : dict = score_fn(_a , _a )
scores.update(_a )
if args.dump_args:
scores.update(_a )
if args.info:
__UpperCamelCase : Dict = args.info
if verbose:
print(_a )
if args.score_path is not None:
json.dump(_a , open(args.score_path , 'w' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 705 | '''simple docstring'''
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
a= {
'''bart''': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''bert''': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-base-cased-finetuned-mrpc''': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''dpr''': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''gpt2''': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlnet''': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm''': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm-roberta''': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''transfo-xl''': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''openai-gpt''': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''roberta''': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''layoutlm''': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''roberta-large-mnli''': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''camembert''': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''flaubert''': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert''': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert-base-distilled-squad''': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert''': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert-visual-feature-encoder''': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''ctrl''': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''albert''': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''t5''': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''electra''': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''wav2vec2''': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def _UpperCamelCase ( _a : int , _a : Union[str, Any] , _a : str , _a : List[str] , _a : Dict=False , _a : str=True ):
"""simple docstring"""
if model_type not in MODEL_CLASSES:
raise ValueError(f"""Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.""" )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[int] = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
__UpperCamelCase : List[Any] = cached_file(_a , _a , force_download=not use_cached_models )
__UpperCamelCase : Union[str, Any] = config_class.from_json_file(_a )
__UpperCamelCase : Union[str, Any] = True
__UpperCamelCase : List[str] = True
print(f"""Building TensorFlow model from configuration: {config}""" )
__UpperCamelCase : Optional[int] = model_class(_a )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
__UpperCamelCase : Union[str, Any] = cached_file(
_a , _a , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
__UpperCamelCase : Any = load_pytorch_checkpoint_in_tfa_model(_a , _a )
if compare_with_pt_model:
__UpperCamelCase : Dict = tf_model(tf_model.dummy_inputs , training=_a ) # build the network
__UpperCamelCase : List[str] = torch.load(_a , map_location='cpu' )
__UpperCamelCase : Union[str, Any] = pt_model_class.from_pretrained(
pretrained_model_name_or_path=_a , config=_a , state_dict=_a )
with torch.no_grad():
__UpperCamelCase : Any = pt_model(**pt_model.dummy_inputs )
__UpperCamelCase : Optional[Any] = pto[0].numpy()
__UpperCamelCase : int = tfo[0].numpy()
__UpperCamelCase : Union[str, Any] = np.amax(np.abs(np_pt - np_tf ) )
print(f"""Max absolute difference between models outputs {diff}""" )
assert diff <= 2E-2, f"""Error, model absolute difference is >2e-2: {diff}"""
# Save pytorch-model
print(f"""Save TensorFlow model to {tf_dump_path}""" )
tf_model.save_weights(_a , save_format='h5' )
def _UpperCamelCase ( _a : str , _a : Optional[Any] , _a : Dict=None , _a : Tuple=None , _a : int=False , _a : Union[str, Any]=False , _a : int=False , _a : List[str]=False , ):
"""simple docstring"""
if args_model_type is None:
__UpperCamelCase : Dict = list(MODEL_CLASSES.keys() )
else:
__UpperCamelCase : Union[str, Any] = [args_model_type]
for j, model_type in enumerate(_a , start=1 ):
print('=' * 1_0_0 )
print(f""" Converting model type {j}/{len(_a )}: {model_type}""" )
print('=' * 1_0_0 )
if model_type not in MODEL_CLASSES:
raise ValueError(f"""Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.""" )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : str = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
__UpperCamelCase : List[str] = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
__UpperCamelCase : Optional[int] = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(_a , _a ) , start=1 ):
print('-' * 1_0_0 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f""" Skipping finetuned checkpoint {model_shortcut_name}""" )
continue
__UpperCamelCase : Dict = model_shortcut_name
elif only_convert_finetuned_models:
print(f""" Skipping not finetuned checkpoint {model_shortcut_name}""" )
continue
print(
f""" Converting checkpoint {i}/{len(_a )}: {model_shortcut_name} - model_type {model_type}""" )
print('-' * 1_0_0 )
if config_shortcut_name in aws_config_map:
__UpperCamelCase : Tuple = cached_file(_a , _a , force_download=not use_cached_models )
else:
__UpperCamelCase : Dict = config_shortcut_name
if model_shortcut_name in aws_model_maps:
__UpperCamelCase : Dict = cached_file(_a , _a , force_download=not use_cached_models )
else:
__UpperCamelCase : Tuple = model_shortcut_name
if os.path.isfile(_a ):
__UpperCamelCase : int = 'converted_model'
convert_pt_checkpoint_to_tf(
model_type=_a , pytorch_checkpoint_path=_a , config_file=_a , tf_dump_path=os.path.join(_a , model_shortcut_name + '-tf_model.h5' ) , compare_with_pt_model=_a , )
if remove_cached_files:
os.remove(_a )
os.remove(_a )
if __name__ == "__main__":
a= argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_dump_path''', default=None, type=str, required=True, help='''Path to the output Tensorflow dump file.'''
)
parser.add_argument(
'''--model_type''',
default=None,
type=str,
help=(
F"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
'''convert all the models from AWS.'''
),
)
parser.add_argument(
'''--pytorch_checkpoint_path''',
default=None,
type=str,
help=(
'''Path to the PyTorch checkpoint path or shortcut name to download from AWS. '''
'''If not given, will download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
help=(
'''The config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture. If not given and '''
'''--pytorch_checkpoint_path is not given or is a shortcut name '''
'''use the configuration associated to the shortcut name on the AWS'''
),
)
parser.add_argument(
'''--compare_with_pt_model''', action='''store_true''', help='''Compare Tensorflow and PyTorch model predictions.'''
)
parser.add_argument(
'''--use_cached_models''',
action='''store_true''',
help='''Use cached models if possible instead of updating to latest checkpoint versions.''',
)
parser.add_argument(
'''--remove_cached_files''',
action='''store_true''',
help='''Remove pytorch models after conversion (save memory when converting in batches).''',
)
parser.add_argument('''--only_convert_finetuned_models''', action='''store_true''', help='''Only convert finetuned models.''')
a= parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 287 | 0 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
@dataclass
class lowercase :
__SCREAMING_SNAKE_CASE : str = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys() )} )
__SCREAMING_SNAKE_CASE : str = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
__SCREAMING_SNAKE_CASE : int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__SCREAMING_SNAKE_CASE : bool = field(
default=a_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def a ( self ):
snake_case_ = self.task_name.lower()
class lowercase ( a_ ):
__SCREAMING_SNAKE_CASE : Optional[int] = '''train'''
__SCREAMING_SNAKE_CASE : Optional[int] = '''dev'''
__SCREAMING_SNAKE_CASE : Any = '''test'''
class lowercase ( a_ ):
__SCREAMING_SNAKE_CASE : GlueDataTrainingArguments
__SCREAMING_SNAKE_CASE : str
__SCREAMING_SNAKE_CASE : List[InputFeatures]
def __init__( self , snake_case , snake_case , snake_case = None , snake_case = Split.train , snake_case = None , ):
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , lowerCAmelCase_ , )
snake_case_ = args
snake_case_ = glue_processors[args.task_name]()
snake_case_ = glue_output_modes[args.task_name]
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
try:
snake_case_ = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
# Load data features from cache or dataset file
snake_case_ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''' , )
snake_case_ = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case_ , snake_case_ = label_list[2], label_list[1]
snake_case_ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case_ = cached_features_file + '.lock'
with FileLock(lowerCAmelCase_ ):
if os.path.exists(lowerCAmelCase_ ) and not args.overwrite_cache:
snake_case_ = time.time()
snake_case_ = torch.load(lowerCAmelCase_ )
logger.info(
F'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
else:
logger.info(F'''Creating features from dataset file at {args.data_dir}''' )
if mode == Split.dev:
snake_case_ = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
snake_case_ = self.processor.get_test_examples(args.data_dir )
else:
snake_case_ = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
snake_case_ = examples[:limit_length]
snake_case_ = glue_convert_examples_to_features(
lowerCAmelCase_ , lowerCAmelCase_ , max_length=args.max_seq_length , label_list=lowerCAmelCase_ , output_mode=self.output_mode , )
snake_case_ = time.time()
torch.save(self.features , lowerCAmelCase_ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ):
return len(self.features )
def __getitem__( self , snake_case ):
return self.features[i]
def a ( self ):
return self.label_list
| 362 |
'''simple docstring'''
class a__ :
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict ) -> List[str]:
__A= name
__A= val
def __str__( self : int ) -> List[Any]:
return F"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self : Optional[Any] , lowerCAmelCase_ : str ) -> List[Any]:
return self.val < other.val
class a__ :
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase_ : int ) -> Any:
__A= {}
__A= {}
__A= self.build_heap(lowerCAmelCase_ )
def __getitem__( self : int , lowerCAmelCase_ : Dict ) -> Tuple:
return self.get_value(lowerCAmelCase_ )
def lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : List[str] ) -> List[str]:
return (idx - 1) // 2
def lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] ) -> List[str]:
return idx * 2 + 1
def lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] ) -> Any:
return idx * 2 + 2
def lowerCAmelCase ( self : str , lowerCAmelCase_ : Union[str, Any] ) -> Dict:
return self.heap_dict[key]
def lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : List[Any] ) -> Tuple:
__A= len(lowerCAmelCase_ ) - 1
__A= self.get_parent_idx(lowerCAmelCase_ )
for idx, i in enumerate(lowerCAmelCase_ ):
__A= idx
__A= i.val
for i in range(lowerCAmelCase_ , -1 , -1 ):
self.sift_down(lowerCAmelCase_ , lowerCAmelCase_ )
return array
def lowerCAmelCase ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple ) -> Dict:
while True:
__A= self.get_left_child_idx(lowerCAmelCase_ ) # noqa: E741
__A= self.get_right_child_idx(lowerCAmelCase_ )
__A= idx
if l < len(lowerCAmelCase_ ) and array[l] < array[idx]:
__A= l
if r < len(lowerCAmelCase_ ) and array[r] < array[smallest]:
__A= r
if smallest != idx:
__A, __A= array[smallest], array[idx]
(
(
__A
), (
__A
),
)= (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
__A= smallest
else:
break
def lowerCAmelCase ( self : Dict , lowerCAmelCase_ : Dict ) -> str:
__A= self.get_parent_idx(lowerCAmelCase_ )
while p >= 0 and self.heap[p] > self.heap[idx]:
__A, __A= self.heap[idx], self.heap[p]
__A, __A= (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
__A= p
__A= self.get_parent_idx(lowerCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
return self.heap[0]
def lowerCAmelCase ( self : List[Any] ) -> int:
__A, __A= self.heap[-1], self.heap[0]
__A, __A= (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
__A= self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]:
self.heap.append(lowerCAmelCase_ )
__A= len(self.heap ) - 1
__A= node.val
self.sift_up(len(self.heap ) - 1 )
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
return len(self.heap ) == 0
def lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] ) -> Optional[Any]:
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
__A= new_value
__A= new_value
self.sift_up(self.idx_of_element[node] )
UpperCAmelCase__ = Node('''R''', -1)
UpperCAmelCase__ = Node('''B''', 6)
UpperCAmelCase__ = Node('''A''', 3)
UpperCAmelCase__ = Node('''X''', 1)
UpperCAmelCase__ = Node('''E''', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
UpperCAmelCase__ = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('''Min Heap - before decrease key''')
for i in my_min_heap.heap:
print(i)
print('''Min Heap - After decrease key of node [B -> -17]''')
my_min_heap.decrease_key(b, -1_7)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 186 | 0 |
'''simple docstring'''
from numpy import exp, pi, sqrt
def _snake_case ( lowercase , lowercase = 0.0 , lowercase = 1.0 ) -> int:
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 697 |
'''simple docstring'''
import qiskit
def _snake_case ( lowercase , lowercase ) -> qiskit.result.counts.Counts:
__a : Any = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
__a : str = qiskit.QuantumCircuit(lowercase , lowercase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
__a : Any = qiskit.execute(lowercase , lowercase , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowercase )
if __name__ == "__main__":
print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''') | 697 | 1 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class snake_case__ ( lowercase_):
'''simple docstring'''
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=False , a__=True , a__=99 , a__=32 , a__=5 , a__=4 , a__=64 , a__="gelu" , a__=0.1 , a__=0.1 , a__=5_12 , a__=16 , a__=2 , a__=0.02 , a__=3 , a__=4 , a__=None , a__=2 , a__=2 , a__=2 , a__=2 , a__=4 , a__=1 , ) -> List[Any]:
'''simple docstring'''
__snake_case :Union[str, Any] = parent
__snake_case :List[Any] = batch_size
__snake_case :Dict = seq_length
__snake_case :Tuple = is_training
__snake_case :Optional[Any] = use_input_mask
__snake_case :Optional[int] = use_token_type_ids
__snake_case :int = use_labels
__snake_case :Union[str, Any] = vocab_size
__snake_case :List[str] = hidden_size
__snake_case :List[Any] = num_hidden_layers
__snake_case :List[Any] = num_attention_heads
__snake_case :Dict = intermediate_size
__snake_case :Any = hidden_act
__snake_case :List[str] = hidden_dropout_prob
__snake_case :Optional[int] = attention_probs_dropout_prob
__snake_case :Dict = max_position_embeddings
__snake_case :Dict = type_vocab_size
__snake_case :str = type_sequence_label_size
__snake_case :int = initializer_range
__snake_case :str = num_labels
__snake_case :List[Any] = num_choices
__snake_case :Any = scope
__snake_case :Optional[Any] = q_groups
__snake_case :Union[str, Any] = k_groups
__snake_case :Any = v_groups
__snake_case :List[Any] = post_attention_groups
__snake_case :Optional[Any] = intermediate_groups
__snake_case :Any = output_groups
def __lowercase ( self ) -> Any:
'''simple docstring'''
__snake_case :str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case :Any = None
if self.use_input_mask:
__snake_case :Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case :List[Any] = None
__snake_case :List[str] = None
__snake_case :Union[str, Any] = None
if self.use_labels:
__snake_case :str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case :Tuple = ids_tensor([self.batch_size] , self.num_choices )
__snake_case :Optional[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase ( self ) -> str:
'''simple docstring'''
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def __lowercase ( self , a__ , a__ , a__ , a__ , a__ , a__ ) -> List[str]:
'''simple docstring'''
__snake_case :int = SqueezeBertModel(config=a__ )
model.to(a__ )
model.eval()
__snake_case :Union[str, Any] = model(a__ , a__ )
__snake_case :List[str] = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self , a__ , a__ , a__ , a__ , a__ , a__ ) -> int:
'''simple docstring'''
__snake_case :List[str] = SqueezeBertForMaskedLM(config=a__ )
model.to(a__ )
model.eval()
__snake_case :int = model(a__ , attention_mask=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase ( self , a__ , a__ , a__ , a__ , a__ , a__ ) -> List[str]:
'''simple docstring'''
__snake_case :int = SqueezeBertForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
__snake_case :List[Any] = model(
a__ , attention_mask=a__ , start_positions=a__ , end_positions=a__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowercase ( self , a__ , a__ , a__ , a__ , a__ , a__ ) -> Dict:
'''simple docstring'''
__snake_case :Union[str, Any] = self.num_labels
__snake_case :Tuple = SqueezeBertForSequenceClassification(a__ )
model.to(a__ )
model.eval()
__snake_case :Optional[int] = model(a__ , attention_mask=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self , a__ , a__ , a__ , a__ , a__ , a__ ) -> Optional[int]:
'''simple docstring'''
__snake_case :List[Any] = self.num_labels
__snake_case :Tuple = SqueezeBertForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
__snake_case :Any = model(a__ , attention_mask=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowercase ( self , a__ , a__ , a__ , a__ , a__ , a__ ) -> Any:
'''simple docstring'''
__snake_case :Any = self.num_choices
__snake_case :str = SqueezeBertForMultipleChoice(config=a__ )
model.to(a__ )
model.eval()
__snake_case :Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case :str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case :Optional[Any] = model(
a__ , attention_mask=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowercase ( self ) -> Any:
'''simple docstring'''
__snake_case :str = self.prepare_config_and_inputs()
((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) :Any = config_and_inputs
__snake_case :List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class snake_case__ ( lowercase_ , lowercase_ , unittest.TestCase):
'''simple docstring'''
lowerCamelCase : List[Any] = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase : Optional[int] = (
{
"feature-extraction": SqueezeBertModel,
"fill-mask": SqueezeBertForMaskedLM,
"question-answering": SqueezeBertForQuestionAnswering,
"text-classification": SqueezeBertForSequenceClassification,
"token-classification": SqueezeBertForTokenClassification,
"zero-shot": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : str = True
lowerCamelCase : Union[str, Any] = False
def __lowercase ( self ) -> Dict:
'''simple docstring'''
__snake_case :int = SqueezeBertModelTester(self )
__snake_case :Union[str, Any] = ConfigTester(self , config_class=a__ , dim=37 )
def __lowercase ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self ) -> Dict:
'''simple docstring'''
__snake_case :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*a__ )
def __lowercase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*a__ )
def __lowercase ( self ) -> List[str]:
'''simple docstring'''
__snake_case :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*a__ )
def __lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*a__ )
def __lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*a__ )
def __lowercase ( self ) -> Tuple:
'''simple docstring'''
__snake_case :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*a__ )
@slow
def __lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case :List[Any] = SqueezeBertModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_sentencepiece
@require_tokenizers
@require_torch
class snake_case__ ( unittest.TestCase):
'''simple docstring'''
@slow
def __lowercase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case :str = SqueezeBertForSequenceClassification.from_pretrained("""squeezebert/squeezebert-mnli""" )
__snake_case :Dict = torch.tensor([[1, 2_94_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 13, 15_88, 2]] )
__snake_case :Union[str, Any] = model(a__ )[0]
__snake_case :Union[str, Any] = torch.Size((1, 3) )
self.assertEqual(output.shape , a__ )
__snake_case :int = torch.tensor([[0.64_01, -0.03_49, -0.60_41]] )
self.assertTrue(torch.allclose(a__ , a__ , atol=1e-4 ) )
| 455 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class snake_case__ ( lowercase_ , unittest.TestCase):
'''simple docstring'''
lowerCamelCase : List[str] = PegasusTokenizer
lowerCamelCase : Optional[int] = PegasusTokenizerFast
lowerCamelCase : int = True
lowerCamelCase : List[Any] = True
def __lowercase ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case :Optional[int] = PegasusTokenizer(a__ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowercase ( self ) -> List[Any]:
'''simple docstring'''
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def __lowercase ( self , **a__ ) -> PegasusTokenizer:
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **a__ )
def __lowercase ( self , a__ ) -> Optional[int]:
'''simple docstring'''
return ("This is a test", "This is a test")
def __lowercase ( self ) -> Dict:
'''simple docstring'''
__snake_case :Dict = """</s>"""
__snake_case :Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ )
def __lowercase ( self ) -> List[str]:
'''simple docstring'''
__snake_case :Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(a__ ) , 11_03 )
def __lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 11_03 )
def __lowercase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case :int = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__snake_case :Tuple = self.tokenizer_class.from_pretrained(self.tmpdirname )
__snake_case :List[Any] = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
__snake_case :Tuple = rust_tokenizer([raw_input_str] , return_tensors=a__ , add_special_tokens=a__ ).input_ids[0]
__snake_case :Union[str, Any] = py_tokenizer([raw_input_str] , return_tensors=a__ , add_special_tokens=a__ ).input_ids[0]
self.assertListEqual(a__ , a__ )
def __lowercase ( self ) -> Tuple:
'''simple docstring'''
__snake_case :List[Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
__snake_case :Any = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
__snake_case :List[Any] = [2, 4_13, 6_15, 1_14, 3, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
__snake_case :Tuple = tokenizer([raw_input_str] , return_tensors=a__ ).input_ids[0]
self.assertListEqual(a__ , a__ )
def __lowercase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case :Optional[int] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_61_03
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_03
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_05
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 10_24
__snake_case :Any = """To ensure a smooth flow of bank resolutions."""
__snake_case :int = [4_13, 6_15, 1_14, 22_91, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
__snake_case :Any = tokenizer([raw_input_str] , return_tensors=a__ ).input_ids[0]
self.assertListEqual(a__ , a__ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def __lowercase ( self ) -> Dict:
'''simple docstring'''
__snake_case :str = ["""This is going to be way too long.""" * 1_50, """short example"""]
__snake_case :int = ["""not super long but more than 5 tokens""", """tiny"""]
__snake_case :str = self._large_tokenizer(a__ , padding=a__ , truncation=a__ , return_tensors="""pt""" )
__snake_case :Optional[int] = self._large_tokenizer(
text_target=a__ , max_length=5 , padding=a__ , truncation=a__ , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 10_24)
assert batch.attention_mask.shape == (2, 10_24)
assert targets["input_ids"].shape == (2, 5)
assert len(a__ ) == 2 # input_ids, attention_mask.
@slow
def __lowercase ( self ) -> Dict:
'''simple docstring'''
__snake_case :Union[str, Any] = {"""input_ids""": [[3_89_79, 1_43, 1_84_85, 6_06, 1_30, 2_66_69, 8_76_86, 1_21, 5_41_89, 11_29, 1_11, 2_66_69, 8_76_86, 1_21, 91_14, 1_47_87, 1_21, 1_32_49, 1_58, 5_92, 9_56, 1_21, 1_46_21, 3_15_76, 1_43, 6_26_13, 1_08, 96_88, 9_30, 4_34_30, 1_15_62, 6_26_13, 3_04, 1_08, 1_14_43, 8_97, 1_08, 93_14, 1_74_15, 6_33_99, 1_08, 1_14_43, 76_14, 1_83_16, 1_18, 42_84, 71_48, 1_24_30, 1_43, 14_00, 2_57_03, 1_58, 1_11, 42_84, 71_48, 1_17_72, 1_43, 2_12_97, 10_64, 1_58, 1_22, 2_04, 35_06, 17_54, 11_33, 1_47_87, 15_81, 1_15, 3_32_24, 44_82, 1_11, 13_55, 1_10, 2_91_73, 3_17, 5_08_33, 1_08, 2_01_47, 9_46_65, 1_11, 7_71_98, 1_07, 1], [1_10, 6_26_13, 1_17, 6_38, 1_12, 11_33, 1_21, 2_00_98, 13_55, 7_90_50, 1_38_72, 1_35, 15_96, 5_35_41, 13_52, 1_41, 1_30_39, 55_42, 1_24, 3_02, 5_18, 1_11, 2_68, 29_56, 1_15, 1_49, 44_27, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_39, 12_35, 27_99, 1_82_89, 1_77_80, 2_04, 1_09, 94_74, 12_96, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class snake_case__ ( lowercase_ , unittest.TestCase):
'''simple docstring'''
lowerCamelCase : List[Any] = PegasusTokenizer
lowerCamelCase : List[Any] = PegasusTokenizerFast
lowerCamelCase : List[str] = True
lowerCamelCase : List[Any] = True
def __lowercase ( self ) -> Dict:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case :Optional[int] = PegasusTokenizer(a__ , offset=0 , mask_token_sent=a__ , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def __lowercase ( self , **a__ ) -> PegasusTokenizer:
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **a__ )
def __lowercase ( self , a__ ) -> str:
'''simple docstring'''
return ("This is a test", "This is a test")
def __lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case :Tuple = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__snake_case :Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname )
__snake_case :Union[str, Any] = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
__snake_case :Dict = rust_tokenizer([raw_input_str] , return_tensors=a__ , add_special_tokens=a__ ).input_ids[0]
__snake_case :List[Any] = py_tokenizer([raw_input_str] , return_tensors=a__ , add_special_tokens=a__ ).input_ids[0]
self.assertListEqual(a__ , a__ )
@require_torch
def __lowercase ( self ) -> str:
'''simple docstring'''
__snake_case :Any = ["""This is going to be way too long.""" * 10_00, """short example"""]
__snake_case :str = ["""not super long but more than 5 tokens""", """tiny"""]
__snake_case :Optional[int] = self._large_tokenizer(a__ , padding=a__ , truncation=a__ , return_tensors="""pt""" )
__snake_case :Union[str, Any] = self._large_tokenizer(
text_target=a__ , max_length=5 , padding=a__ , truncation=a__ , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 40_96)
assert batch.attention_mask.shape == (2, 40_96)
assert targets["input_ids"].shape == (2, 5)
assert len(a__ ) == 2 # input_ids, attention_mask.
def __lowercase ( self ) -> List[str]:
'''simple docstring'''
__snake_case :Tuple = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
__snake_case :Dict = self._large_tokenizer(a__ ).input_ids
self.assertListEqual(
a__ , [1_82, 1_17, 1_42, 5_87, 42_11, 1_20, 1_17, 2_63, 1_12, 8_04, 1_09, 8_56, 2_50_16, 31_37, 4_64, 1_09, 2_69_55, 31_37, 1] , )
| 455 | 1 |
from __future__ import annotations
from collections import deque
class _a :
"""simple docstring"""
def __init__( self : Tuple , a : list[str] ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : list[dict] = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []} )
for keyword in keywords:
self.add_keyword(a )
self.set_fail_transitions()
def A_ ( self : int , a : int , a : str ) ->int | None:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def A_ ( self : str , a : str ) ->None:
SCREAMING_SNAKE_CASE__ : int = 0
for character in keyword:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.find_next_state(a , a )
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = len(self.adlist ) - 1
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = next_state
self.adlist[current_state]["output"].append(a )
def A_ ( self : Any ) ->None:
SCREAMING_SNAKE_CASE__ : deque = deque()
for node in self.adlist[0]["next_states"]:
q.append(a )
SCREAMING_SNAKE_CASE__ : Dict = 0
while q:
SCREAMING_SNAKE_CASE__ : Optional[Any] = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(a )
SCREAMING_SNAKE_CASE__ : Dict = self.adlist[r]["fail_state"]
while (
self.find_next_state(a , self.adlist[child]["value"] ) is None
and state != 0
):
SCREAMING_SNAKE_CASE__ : Dict = self.adlist[state]["fail_state"]
SCREAMING_SNAKE_CASE__ : int = self.find_next_state(
a , self.adlist[child]["value"] )
if self.adlist[child]["fail_state"] is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
SCREAMING_SNAKE_CASE__ : str = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def A_ ( self : int , a : str ) ->dict[str, list[int]]:
SCREAMING_SNAKE_CASE__ : dict = {} # returns a dict with keywords and list of its occurrences
SCREAMING_SNAKE_CASE__ : str = 0
for i in range(len(a ) ):
while (
self.find_next_state(a , string[i] ) is None
and current_state != 0
):
SCREAMING_SNAKE_CASE__ : int = self.adlist[current_state]["fail_state"]
SCREAMING_SNAKE_CASE__ : Any = self.find_next_state(a , string[i] )
if next_state is None:
SCREAMING_SNAKE_CASE__ : str = 0
else:
SCREAMING_SNAKE_CASE__ : List[str] = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
SCREAMING_SNAKE_CASE__ : Optional[int] = []
result[key].append(i - len(a ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod() | 26 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowercase :Tuple = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
__lowercase :str = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
__lowercase :List[Any] = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def A_ ( self : List[Any] ) ->MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def A_ ( self : str , a : List[List[List[str]]] , a : List[List[str]] , a : int = 1 , a : int = 4 , ) ->Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=a , hypotheses=a , min_len=a , max_len=a )
} | 26 | 1 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = (DDPMScheduler,)
def UpperCAmelCase ( self , **__a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**__a)
return config
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=__a , beta_end=__a)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__a)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__a)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
self.check_over_configs(thresholding=__a)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__a , prediction_type=__a , sample_max_value=__a , )
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=__a)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87) - 0.0_0979)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99) - 0.02)) < 1e-5
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
_UpperCamelCase = len(__a)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter
_UpperCamelCase = torch.manual_seed(0)
for t in reversed(range(__a)):
# 1. predict noise residual
_UpperCamelCase = model(__a , __a)
# 2. predict previous mean of sample x_t-1
_UpperCamelCase = scheduler.step(__a , __a , __a , generator=__a).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_UpperCamelCase = pred_prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
assert abs(result_sum.item() - 258.9606) < 1e-2
assert abs(result_mean.item() - 0.3372) < 1e-3
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config(prediction_type='''v_prediction''')
_UpperCamelCase = scheduler_class(**__a)
_UpperCamelCase = len(__a)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter
_UpperCamelCase = torch.manual_seed(0)
for t in reversed(range(__a)):
# 1. predict noise residual
_UpperCamelCase = model(__a , __a)
# 2. predict previous mean of sample x_t-1
_UpperCamelCase = scheduler.step(__a , __a , __a , generator=__a).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_UpperCamelCase = pred_prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
assert abs(result_sum.item() - 202.0296) < 1e-2
assert abs(result_mean.item() - 0.2631) < 1e-3
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
_UpperCamelCase = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__a)
_UpperCamelCase = scheduler.timesteps
for i, timestep in enumerate(__a):
if i == len(__a) - 1:
_UpperCamelCase = -1
else:
_UpperCamelCase = timesteps[i + 1]
_UpperCamelCase = scheduler.previous_timestep(__a)
_UpperCamelCase = prev_t.item()
self.assertEqual(__a , __a)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
_UpperCamelCase = [1_00, 87, 50, 51, 0]
with self.assertRaises(__a , msg='''`custom_timesteps` must be in descending order.'''):
scheduler.set_timesteps(timesteps=__a)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
_UpperCamelCase = [1_00, 87, 50, 1, 0]
_UpperCamelCase = len(__a)
with self.assertRaises(__a , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.'''):
scheduler.set_timesteps(num_inference_steps=__a , timesteps=__a)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
_UpperCamelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__a , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=__a)
| 19 | A = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
A = [{'type': 'code', 'content': INSTALL_CONTENT}]
A = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
} | 544 | 0 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class UpperCamelCase :
def __init__(self : Dict , _A : Tuple , _A : Tuple=13 , _A : Any=30 , _A : List[Any]=2 , _A : str=3 , _A : List[str]=True , _A : int=True , _A : Union[str, Any]=32 , _A : str=2 , _A : Tuple=4 , _A : Optional[Any]=37 , _A : str="gelu" , _A : Tuple=0.1 , _A : Any=0.1 , _A : Optional[Any]=10 , _A : Optional[Any]=0.02 , _A : Union[str, Any]=3 , _A : Tuple=None , _A : Any=2 , ) -> Any:
__snake_case : Optional[int] = parent
__snake_case : Optional[Any] = batch_size
__snake_case : int = image_size
__snake_case : Dict = patch_size
__snake_case : Dict = num_channels
__snake_case : Tuple = is_training
__snake_case : str = use_labels
__snake_case : Optional[int] = hidden_size
__snake_case : Union[str, Any] = num_hidden_layers
__snake_case : str = num_attention_heads
__snake_case : List[Any] = intermediate_size
__snake_case : Optional[Any] = hidden_act
__snake_case : Dict = hidden_dropout_prob
__snake_case : List[Any] = attention_probs_dropout_prob
__snake_case : Optional[Any] = type_sequence_label_size
__snake_case : str = initializer_range
__snake_case : List[Any] = scope
__snake_case : int = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__snake_case : List[str] = (image_size // patch_size) ** 2
__snake_case : str = num_patches + 2
def _lowercase (self : List[Any]) -> List[str]:
__snake_case : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__snake_case : str = None
if self.use_labels:
__snake_case : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__snake_case : Dict = self.get_config()
return config, pixel_values, labels
def _lowercase (self : Any) -> Optional[int]:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _lowercase (self : str , _A : List[Any] , _A : List[Any] , _A : Union[str, Any]) -> Union[str, Any]:
__snake_case : int = TFDeiTModel(config=_A)
__snake_case : Union[str, Any] = model(_A)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowercase (self : Optional[Any] , _A : Union[str, Any] , _A : Optional[int] , _A : Union[str, Any]) -> int:
__snake_case : List[str] = TFDeiTForMaskedImageModeling(config=_A)
__snake_case : List[str] = model(_A)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
__snake_case : Optional[int] = 1
__snake_case : Any = TFDeiTForMaskedImageModeling(_A)
__snake_case : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__snake_case : Tuple = model(_A)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def _lowercase (self : Union[str, Any] , _A : int , _A : Optional[Any] , _A : Any) -> Union[str, Any]:
__snake_case : Optional[Any] = self.type_sequence_label_size
__snake_case : str = TFDeiTForImageClassification(_A)
__snake_case : List[str] = model(_A , labels=_A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
__snake_case : str = 1
__snake_case : List[Any] = TFDeiTForImageClassification(_A)
__snake_case : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__snake_case : Dict = model(_A , labels=_A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _lowercase (self : Optional[int]) -> Any:
__snake_case : Tuple = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : List[Any] = config_and_inputs
__snake_case : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase ( lowercase , lowercase , unittest.TestCase ):
UpperCAmelCase : Dict = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
UpperCAmelCase : str = (
{
"""feature-extraction""": TFDeiTModel,
"""image-classification""": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
UpperCAmelCase : Any = False
UpperCAmelCase : int = False
UpperCAmelCase : Optional[Any] = False
UpperCAmelCase : int = False
def _lowercase (self : Dict) -> str:
__snake_case : int = TFDeiTModelTester(self)
__snake_case : int = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37)
def _lowercase (self : List[str]) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds')
def _lowercase (self : str) -> Optional[int]:
pass
def _lowercase (self : Union[str, Any]) -> List[Any]:
__snake_case , __snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Tuple = model_class(_A)
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer))
__snake_case : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , tf.keras.layers.Dense))
def _lowercase (self : Optional[int]) -> Any:
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Union[str, Any] = model_class(_A)
__snake_case : Optional[int] = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : int = [*signature.parameters.keys()]
__snake_case : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A)
def _lowercase (self : Any) -> int:
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A)
def _lowercase (self : Optional[Any]) -> Optional[int]:
__snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_A)
def _lowercase (self : Optional[int]) -> Any:
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A)
def _lowercase (self : Dict , _A : Any , _A : Optional[Any] , _A : Any=False) -> Union[str, Any]:
__snake_case : Dict = super()._prepare_for_class(_A , _A , return_labels=_A)
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def _lowercase (self : str) -> Any:
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Tuple = TFDeiTModel.from_pretrained(_A)
self.assertIsNotNone(_A)
def __UpperCAmelCase ( ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class UpperCamelCase ( unittest.TestCase ):
@cached_property
def _lowercase (self : Union[str, Any]) -> int:
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224')
if is_vision_available()
else None
)
@slow
def _lowercase (self : str) -> Dict:
__snake_case : List[str] = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224')
__snake_case : Tuple = self.default_image_processor
__snake_case : str = prepare_img()
__snake_case : Optional[int] = image_processor(images=_A , return_tensors='tf')
# forward pass
__snake_case : int = model(**_A)
# verify the logits
__snake_case : Optional[Any] = tf.TensorShape((1, 10_00))
self.assertEqual(outputs.logits.shape , _A)
__snake_case : Optional[Any] = tf.constant([-1.0_266, 0.1_912, -1.2_861])
self.assertTrue(np.allclose(outputs.logits[0, :3] , _A , atol=1E-4))
| 192 | """simple docstring"""
from typing import Any
class UpperCamelCase :
def __init__(self : List[str] , _A : Any) -> int:
__snake_case : Any = data
__snake_case : Dict = None
def __repr__(self : Tuple) -> str:
return f"Node({self.data})"
class UpperCamelCase :
def __init__(self : Union[str, Any]) -> Union[str, Any]:
__snake_case : Any = None
def __iter__(self : Tuple) -> Any:
__snake_case : List[str] = self.head
while node:
yield node.data
__snake_case : Any = node.next
def __len__(self : str) -> int:
return sum(1 for _ in self)
def __repr__(self : int) -> str:
return "->".join([str(_A) for item in self])
def __getitem__(self : List[Any] , _A : int) -> Any:
if not 0 <= index < len(self):
raise ValueError('list index out of range.')
for i, node in enumerate(self):
if i == index:
return node
return None
def __setitem__(self : int , _A : int , _A : Any) -> None:
if not 0 <= index < len(self):
raise ValueError('list index out of range.')
__snake_case : Optional[int] = self.head
for _ in range(_A):
__snake_case : Any = current.next
__snake_case : Dict = data
def _lowercase (self : List[Any] , _A : Any) -> None:
self.insert_nth(len(self) , _A)
def _lowercase (self : List[str] , _A : Any) -> None:
self.insert_nth(0 , _A)
def _lowercase (self : Optional[Any] , _A : int , _A : Any) -> None:
if not 0 <= index <= len(self):
raise IndexError('list index out of range')
__snake_case : str = Node(_A)
if self.head is None:
__snake_case : str = new_node
elif index == 0:
__snake_case : Union[str, Any] = self.head # link new_node to head
__snake_case : int = new_node
else:
__snake_case : Any = self.head
for _ in range(index - 1):
__snake_case : Any = temp.next
__snake_case : Dict = temp.next
__snake_case : str = new_node
def _lowercase (self : Optional[int]) -> None: # print every node data
print(self)
def _lowercase (self : Optional[Any]) -> Any:
return self.delete_nth(0)
def _lowercase (self : List[str]) -> Any: # delete from tail
return self.delete_nth(len(self) - 1)
def _lowercase (self : int , _A : int = 0) -> Any:
if not 0 <= index <= len(self) - 1: # test if index is valid
raise IndexError('List index out of range.')
__snake_case : int = self.head # default first node
if index == 0:
__snake_case : Any = self.head.next
else:
__snake_case : List[Any] = self.head
for _ in range(index - 1):
__snake_case : List[str] = temp.next
__snake_case : Union[str, Any] = temp.next
__snake_case : str = temp.next.next
return delete_node.data
def _lowercase (self : str) -> bool:
return self.head is None
def _lowercase (self : Tuple) -> None:
__snake_case : List[Any] = None
__snake_case : Optional[Any] = self.head
while current:
# Store the current node's next node.
__snake_case : List[str] = current.next
# Make the current node's next point backwards
__snake_case : Optional[Any] = prev
# Make the previous node be the current node
__snake_case : Optional[Any] = current
# Make the current node the next node (to progress iteration)
__snake_case : Any = next_node
# Return prev in order to put the head at the end
__snake_case : Optional[Any] = prev
def __UpperCAmelCase ( ) -> None:
'''simple docstring'''
__snake_case : Union[str, Any] = LinkedList()
assert linked_list.is_empty() is True
assert str(UpperCAmelCase_ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(UpperCAmelCase_ ) == i
linked_list.insert_nth(UpperCAmelCase_ , i + 1 )
assert str(UpperCAmelCase_ ) == "->".join(str(UpperCAmelCase_ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(UpperCAmelCase_ ) == "->".join(str(UpperCAmelCase_ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(UpperCAmelCase_ ) == 9
assert str(UpperCAmelCase_ ) == "->".join(str(UpperCAmelCase_ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
__snake_case : Tuple = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(UpperCAmelCase_ ) == "->".join(str(UpperCAmelCase_ ) for i in range(-8 , 1 ) )
def __UpperCAmelCase ( ) -> None:
'''simple docstring'''
__snake_case : str = [
-9,
1_00,
Node(77_34_51_12 ),
'dlrow olleH',
7,
55_55,
0,
-192.55_555,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
__snake_case : Optional[Any] = LinkedList()
for i in test_input:
linked_list.insert_tail(UpperCAmelCase_ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(UpperCAmelCase_ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
__snake_case : int = linked_list.delete_head()
assert result == -9
assert (
str(UpperCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
__snake_case : Any = linked_list.delete_tail()
assert result == 12.2
assert (
str(UpperCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
__snake_case : Tuple = linked_list.delete_nth(10 )
assert result is None
assert (
str(UpperCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(UpperCAmelCase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(UpperCAmelCase_ )
assert (
str(UpperCAmelCase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(UpperCAmelCase_ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def __UpperCAmelCase ( ) -> List[Any]:
'''simple docstring'''
from doctest import testmod
testmod()
__snake_case : int = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(UpperCAmelCase_ )
print('\nReading/changing Node data using indexing:' )
print(F"Element at Position 1: {linked_list[1]}" )
__snake_case : Optional[Any] = input('Enter New Value: ' ).strip()
print('New list:' )
print(UpperCAmelCase_ )
print(F"length of linked_list is : {len(UpperCAmelCase_ )}" )
if __name__ == "__main__":
main()
| 192 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __A ( metaclass=_a ):
_UpperCamelCase : int = ["transformers", "torch", "note_seq"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""transformers""", """torch""", """note_seq"""] )
@classmethod
def __A ( cls , *a__ , **a__ ):
requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] )
@classmethod
def __A ( cls , *a__ , **a__ ):
requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] )
| 213 | """simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCAmelCase_ :
@staticmethod
def _UpperCAmelCase ( *a , **a ) -> Union[str, Any]:
pass
def a_ ( _lowerCAmelCase : List[Any] ):
'''simple docstring'''
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
_UpperCamelCase : Optional[Any] = (
"https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ : List[str] = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _UpperCAmelCase ( self , a , a , a ) -> Tuple:
lowercase__ : Tuple = pipeline(
'document-question-answering' , model=a , tokenizer=a , image_processor=a )
lowercase__ : str = INVOICE_URL
lowercase__ : int = list(zip(*apply_tesseract(load_image(a ) , a , '' ) ) )
lowercase__ : Dict = 'What is the placebo?'
lowercase__ : Dict = [
{
'image': load_image(a ),
'question': question,
},
{
'image': image,
'question': question,
},
{
'image': image,
'question': question,
'word_boxes': word_boxes,
},
]
return dqa_pipeline, examples
def _UpperCAmelCase ( self , a , a ) -> int:
lowercase__ : List[str] = dqa_pipeline(a , top_k=2 )
self.assertEqual(
a , [
[
{'score': ANY(a ), 'answer': ANY(a ), 'start': ANY(a ), 'end': ANY(a )},
{'score': ANY(a ), 'answer': ANY(a ), 'start': ANY(a ), 'end': ANY(a )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : List[Any] = pipeline('document-question-answering' , model='hf-internal-testing/tiny-random-layoutlmv2' )
lowercase__ : str = INVOICE_URL
lowercase__ : Tuple = 'How many cats are there?'
lowercase__ : int = [
{'score': 0.0_001, 'answer': 'oy 2312/2019', 'start': 3_8, 'end': 3_9},
{'score': 0.0_001, 'answer': 'oy 2312/2019 DUE', 'start': 3_8, 'end': 4_0},
]
lowercase__ : int = dqa_pipeline(image=a , question=a , top_k=2 )
self.assertEqual(nested_simplify(a , decimals=4 ) , a )
lowercase__ : Dict = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(nested_simplify(a , decimals=4 ) , a )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowercase__ : Any = './tests/fixtures/tests_samples/COCO/000000039769.png'
lowercase__ : Optional[Any] = dqa_pipeline(image=a , question=a , top_k=2 )
self.assertEqual(a , [] )
# We can optionnally pass directly the words and bounding boxes
lowercase__ : List[Any] = './tests/fixtures/tests_samples/COCO/000000039769.png'
lowercase__ : List[Any] = []
lowercase__ : int = []
lowercase__ : Dict = dqa_pipeline(image=a , question=a , words=a , boxes=a , top_k=2 )
self.assertEqual(a , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : List[Any] = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , )
lowercase__ : Dict = INVOICE_URL
lowercase__ : List[Any] = 'What is the invoice number?'
lowercase__ : List[str] = dqa_pipeline(image=a , question=a , top_k=2 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{'score': 0.9_944, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0_009, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
lowercase__ : Dict = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{'score': 0.9_944, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0_009, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
lowercase__ : str = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{'score': 0.9_944, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0_009, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : List[Any] = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , max_seq_len=5_0 , )
lowercase__ : int = INVOICE_URL
lowercase__ : str = 'What is the invoice number?'
lowercase__ : List[Any] = dqa_pipeline(image=a , question=a , top_k=2 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{'score': 0.9_974, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
{'score': 0.9_948, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
lowercase__ : Union[str, Any] = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{'score': 0.9_974, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
{'score': 0.9_948, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
lowercase__ : List[str] = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{'score': 0.9_974, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
{'score': 0.9_948, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : str = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=a )
lowercase__ : Tuple = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=a , revision='3dc6de3' , )
lowercase__ : Union[str, Any] = INVOICE_URL
lowercase__ : Any = 'What is the invoice number?'
lowercase__ : List[Any] = dqa_pipeline(image=a , question=a , top_k=2 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{'score': 0.4_251, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0_819, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
] , )
lowercase__ : Dict = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{'score': 0.4_251, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0_819, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
] , )
lowercase__ : Any = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{'score': 0.4_251, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0_819, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
]
]
* 2 , )
lowercase__ : List[str] = list(zip(*apply_tesseract(load_image(a ) , a , '' ) ) )
# This model should also work if `image` is set to None
lowercase__ : Union[str, Any] = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{'score': 0.4_251, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0_819, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : List[Any] = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=a )
lowercase__ : Any = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=a , revision='3dc6de3' , max_seq_len=5_0 , )
lowercase__ : Tuple = INVOICE_URL
lowercase__ : str = 'What is the invoice number?'
lowercase__ : List[str] = dqa_pipeline(image=a , question=a , top_k=2 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{'score': 0.9_999, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.9_998, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
lowercase__ : Tuple = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{'score': 0.9_999, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.9_998, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
]
]
* 2 , )
lowercase__ : Optional[Any] = list(zip(*apply_tesseract(load_image(a ) , a , '' ) ) )
# This model should also work if `image` is set to None
lowercase__ : List[Any] = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{'score': 0.9_999, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.9_998, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
@slow
@require_torch
def _UpperCAmelCase ( self ) -> str:
lowercase__ : int = pipeline(
'document-question-answering' , model='naver-clova-ix/donut-base-finetuned-docvqa' , tokenizer=AutoTokenizer.from_pretrained('naver-clova-ix/donut-base-finetuned-docvqa' ) , feature_extractor='naver-clova-ix/donut-base-finetuned-docvqa' , )
lowercase__ : Optional[Any] = INVOICE_URL
lowercase__ : Optional[int] = 'What is the invoice number?'
lowercase__ : int = dqa_pipeline(image=a , question=a , top_k=2 )
self.assertEqual(nested_simplify(a , decimals=4 ) , [{'answer': 'us-001'}] )
@require_tf
@unittest.skip('Document question answering not implemented in TF' )
def _UpperCAmelCase ( self ) -> str:
pass
| 599 | 0 |
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
_A = '''0.12''' # assumed parallelism: 8
@require_flax
@is_staging_test
class A ( unittest.TestCase ):
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls ):
"""simple docstring"""
lowerCAmelCase_ = TOKEN
HfFolder.save_token(UpperCamelCase__ )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls ):
"""simple docstring"""
try:
delete_repo(token=cls._token, repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 )
lowerCAmelCase_ = FlaxBertModel(UpperCamelCase__ )
model.push_to_hub('''test-model-flax''', use_auth_token=self._token )
lowerCAmelCase_ = FlaxBertModel.from_pretrained(f"{USER}/test-model-flax" )
lowerCAmelCase_ = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCamelCase__, 1E-3, msg=f"{key} not identical" )
# Reset repo
delete_repo(token=self._token, repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(UpperCamelCase__, repo_id='''test-model-flax''', push_to_hub=UpperCamelCase__, use_auth_token=self._token )
lowerCAmelCase_ = FlaxBertModel.from_pretrained(f"{USER}/test-model-flax" )
lowerCAmelCase_ = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCamelCase__, 1E-3, msg=f"{key} not identical" )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 )
lowerCAmelCase_ = FlaxBertModel(UpperCamelCase__ )
model.push_to_hub('''valid_org/test-model-flax-org''', use_auth_token=self._token )
lowerCAmelCase_ = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
lowerCAmelCase_ = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCamelCase__, 1E-3, msg=f"{key} not identical" )
# Reset repo
delete_repo(token=self._token, repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
UpperCamelCase__, repo_id='''valid_org/test-model-flax-org''', push_to_hub=UpperCamelCase__, use_auth_token=self._token )
lowerCAmelCase_ = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
lowerCAmelCase_ = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCamelCase__, 1E-3, msg=f"{key} not identical" )
def __UpperCamelCase ( _A , _A ):
lowerCAmelCase_ = True
lowerCAmelCase_ = flatten_dict(modela.params )
lowerCAmelCase_ = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
lowerCAmelCase_ = False
return models_are_equal
@require_flax
class A ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
lowerCAmelCase_ = FlaxBertModel(UpperCamelCase__ )
lowerCAmelCase_ = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(UpperCamelCase__, UpperCamelCase__ ) )
with self.assertRaises(UpperCamelCase__ ):
lowerCAmelCase_ = FlaxBertModel.from_pretrained(UpperCamelCase__ )
lowerCAmelCase_ = FlaxBertModel.from_pretrained(UpperCamelCase__, subfolder=UpperCamelCase__ )
self.assertTrue(check_models_equal(UpperCamelCase__, UpperCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
lowerCAmelCase_ = FlaxBertModel(UpperCamelCase__ )
lowerCAmelCase_ = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(UpperCamelCase__, UpperCamelCase__ ), max_shard_size='''10KB''' )
with self.assertRaises(UpperCamelCase__ ):
lowerCAmelCase_ = FlaxBertModel.from_pretrained(UpperCamelCase__ )
lowerCAmelCase_ = FlaxBertModel.from_pretrained(UpperCamelCase__, subfolder=UpperCamelCase__ )
self.assertTrue(check_models_equal(UpperCamelCase__, UpperCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = '''bert'''
lowerCAmelCase_ = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(UpperCamelCase__ ):
lowerCAmelCase_ = FlaxBertModel.from_pretrained(UpperCamelCase__ )
lowerCAmelCase_ = FlaxBertModel.from_pretrained(UpperCamelCase__, subfolder=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = '''bert'''
lowerCAmelCase_ = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(UpperCamelCase__ ):
lowerCAmelCase_ = FlaxBertModel.from_pretrained(UpperCamelCase__ )
lowerCAmelCase_ = FlaxBertModel.from_pretrained(UpperCamelCase__, subfolder=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
| 325 |
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A ( __UpperCAmelCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__, '''embed_dim''' ) )
self.parent.assertTrue(hasattr(UpperCamelCase__, '''num_heads''' ) )
class A :
def __init__( self, UpperCamelCase__, UpperCamelCase__=13, UpperCamelCase__=64, UpperCamelCase__=3, UpperCamelCase__=[16, 48, 96], UpperCamelCase__=[1, 3, 6], UpperCamelCase__=[1, 2, 10], UpperCamelCase__=[7, 3, 3], UpperCamelCase__=[4, 2, 2], UpperCamelCase__=[2, 1, 1], UpperCamelCase__=[2, 2, 2], UpperCamelCase__=[False, False, True], UpperCamelCase__=[0.0, 0.0, 0.0], UpperCamelCase__=0.02, UpperCamelCase__=1E-12, UpperCamelCase__=True, UpperCamelCase__=True, UpperCamelCase__=2, ):
"""simple docstring"""
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = image_size
lowerCAmelCase_ = patch_sizes
lowerCAmelCase_ = patch_stride
lowerCAmelCase_ = patch_padding
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = num_labels
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = embed_dim
lowerCAmelCase_ = num_heads
lowerCAmelCase_ = stride_kv
lowerCAmelCase_ = depth
lowerCAmelCase_ = cls_token
lowerCAmelCase_ = attention_drop_rate
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ = None
if self.use_labels:
lowerCAmelCase_ = ids_tensor([self.batch_size], self.num_labels )
lowerCAmelCase_ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return CvtConfig(
image_size=self.image_size, num_labels=self.num_labels, num_channels=self.num_channels, embed_dim=self.embed_dim, num_heads=self.num_heads, patch_sizes=self.patch_sizes, patch_padding=self.patch_padding, patch_stride=self.patch_stride, stride_kv=self.stride_kv, depth=self.depth, cls_token=self.cls_token, attention_drop_rate=self.attention_drop_rate, initializer_range=self.initializer_range, )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = CvtModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCAmelCase_ = model(UpperCamelCase__ )
lowerCAmelCase_ = (self.image_size, self.image_size)
lowerCAmelCase_ , lowerCAmelCase_ = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
lowerCAmelCase_ = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
lowerCAmelCase_ = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.embed_dim[-1], height, width) )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = self.num_labels
lowerCAmelCase_ = CvtForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCAmelCase_ = model(UpperCamelCase__, labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = config_and_inputs
lowerCAmelCase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__snake_case = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
__snake_case = (
{'feature-extraction': CvtModel, 'image-classification': CvtForImageClassification}
if is_torch_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = CvtModelTester(self )
lowerCAmelCase_ = ConfigTester(self, config_class=UpperCamelCase__, has_text_modality=UpperCamelCase__, hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return
@unittest.skip(reason='''Cvt does not output attentions''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = model_class(UpperCamelCase__ )
lowerCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ = [*signature.parameters.keys()]
lowerCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1], UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
lowerCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__, UpperCamelCase__ ) )
lowerCAmelCase_ = outputs.hidden_states
lowerCAmelCase_ = len(self.model_tester.depth )
self.assertEqual(len(UpperCamelCase__ ), UpperCamelCase__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ), [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
], )
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = True
check_hidden_states_output(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ = True
check_hidden_states_output(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ = CvtModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def __UpperCamelCase ( ):
lowerCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(UpperCamelCase__ )
lowerCAmelCase_ = self.default_image_processor
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = image_processor(images=UpperCamelCase__, return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCAmelCase_ = model(**UpperCamelCase__ )
# verify the logits
lowerCAmelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape, UpperCamelCase__ )
lowerCAmelCase_ = torch.tensor([0.9_285, 0.9_015, -0.3_150] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], UpperCamelCase__, atol=1E-4 ) )
| 325 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCamelCase__ : Optional[Any] = {'''tokenization_byt5''': ['''ByT5Tokenizer''']}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
lowerCamelCase__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 238 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _UpperCAmelCase ( __a , __a , unittest.TestCase):
__a : Dict = IFInpaintingSuperResolutionPipeline
__a : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
__a : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""})
__a : Optional[int] = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __snake_case ( self ) -> Optional[int]:
'''simple docstring'''
return self._get_superresolution_dummy_components()
def __snake_case ( self , _A , _A=0 ) -> Union[str, Any]:
'''simple docstring'''
if str(_A ).startswith("""mps""" ):
_UpperCAmelCase : Union[str, Any] = torch.manual_seed(_A )
else:
_UpperCAmelCase : Tuple = torch.Generator(device=_A ).manual_seed(_A )
_UpperCAmelCase : Tuple = floats_tensor((1, 3, 16, 16) , rng=random.Random(_A ) ).to(_A )
_UpperCAmelCase : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
_UpperCAmelCase : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
_UpperCAmelCase : Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def __snake_case ( self ) -> int:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def __snake_case ( self ) -> Tuple:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __snake_case ( self ) -> str:
'''simple docstring'''
self._test_save_load_local()
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 238 | 1 |
import sys
lowerCAmelCase : str = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = 1
for digit in s:
product *= int(__A )
return product
def A_ ( a = N ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = -sys.maxsize - 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = n[:1_3]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1_3
while cur_index < len(__A ) - 1_3:
if int(n[cur_index] ) >= int(substr[0] ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = substr[1:] + n[cur_index]
cur_index += 1
else:
SCREAMING_SNAKE_CASE_ : Dict = max(__A , str_eval(__A ) )
SCREAMING_SNAKE_CASE_ : Dict = n[cur_index : cur_index + 1_3]
cur_index += 1_3
return largest_product
if __name__ == "__main__":
print(F'{solution() = }')
| 708 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A_ ( a , a , a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = TaConfig.from_json_file(a )
print(f"Building PyTorch model from configuration: {config}" )
SCREAMING_SNAKE_CASE_ : Tuple = TaForConditionalGeneration(a )
# Load weights from tf checkpoint
load_tf_weights_in_ta(a , a , a )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(a )
if __name__ == "__main__":
lowerCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 353 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
@slow
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : List[str] =AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" , return_dict=__snake_case ).to(__snake_case )
__magic_name__ : Any =AutoTokenizer.from_pretrained("""google/mt5-small""" )
__magic_name__ : Dict =tokenizer("""Hello there""" , return_tensors="""pt""" ).input_ids
__magic_name__ : Tuple =tokenizer("""Hi I am""" , return_tensors="""pt""" ).input_ids
__magic_name__ : List[Any] =model(input_ids.to(__snake_case ) , labels=labels.to(__snake_case ) ).loss
__magic_name__ : List[Any] =-(labels.shape[-1] * loss.item())
__magic_name__ : Optional[int] =-84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 21 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
snake_case = logging.get_logger(__name__)
snake_case = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case = {
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
snake_case = {
'''junnyu/roformer_chinese_small''': 1_5_3_6,
'''junnyu/roformer_chinese_base''': 1_5_3_6,
'''junnyu/roformer_chinese_char_small''': 5_1_2,
'''junnyu/roformer_chinese_char_base''': 5_1_2,
'''junnyu/roformer_small_discriminator''': 1_2_8,
'''junnyu/roformer_small_generator''': 1_2_8,
}
snake_case = {
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : str = VOCAB_FILES_NAMES
A__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
A__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Dict = PRETRAINED_INIT_CONFIGURATION
A__ : Any = RoFormerTokenizer
def __init__( self : Optional[int] , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : int=True , __lowerCamelCase : Any="[UNK]" , __lowerCamelCase : Tuple="[SEP]" , __lowerCamelCase : Optional[Any]="[PAD]" , __lowerCamelCase : Union[str, Any]="[CLS]" , __lowerCamelCase : int="[MASK]" , __lowerCamelCase : str=True , __lowerCamelCase : List[Any]=None , **__lowerCamelCase : Any , ):
"""simple docstring"""
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenize_chinese_chars=__lowerCamelCase , strip_accents=__lowerCamelCase , **__lowerCamelCase , )
_snake_case = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''' , __lowerCamelCase ) != do_lower_case
or pre_tok_state.get('''strip_accents''' , __lowerCamelCase ) != strip_accents
):
_snake_case = getattr(__lowerCamelCase , pre_tok_state.pop('''type''' ) )
_snake_case = do_lower_case
_snake_case = strip_accents
_snake_case = pre_tok_class(**__lowerCamelCase )
_snake_case = do_lower_case
def __getstate__( self : int ):
"""simple docstring"""
_snake_case = self.__dict__.copy()
_snake_case = BertPreTokenizer()
return state
def __setstate__( self : Dict , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
_snake_case = d
_snake_case = self.__dict__['''_tokenizer'''].get_vocab()
_snake_case = PreTokenizer.custom(JiebaPreTokenizer(__lowerCamelCase ) )
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : List[Any]=None ):
"""simple docstring"""
_snake_case = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
_snake_case = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str]=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[str]=False , **__lowerCamelCase : List[Any] , ):
"""simple docstring"""
_snake_case = BertPreTokenizer()
return super().save_pretrained(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
| 103 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class lowercase__ :
'''simple docstring'''
_UpperCAmelCase = 42
_UpperCAmelCase = None
_UpperCAmelCase = None
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = Node(1 )
_UpperCAmelCase = Node(2 )
_UpperCAmelCase = Node(3 )
_UpperCAmelCase = Node(4 )
_UpperCAmelCase = Node(5 )
return tree
def UpperCAmelCase ( A : Node | None ):
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def UpperCAmelCase ( A : Node | None ):
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def UpperCAmelCase ( A : Node | None ):
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def UpperCAmelCase ( A : Node | None ):
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def UpperCAmelCase ( A : Node | None ):
'''simple docstring'''
_UpperCAmelCase = []
if root is None:
return output
_UpperCAmelCase = deque([root] )
while process_queue:
_UpperCAmelCase = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def UpperCAmelCase ( A : Node | None , A : int ):
'''simple docstring'''
_UpperCAmelCase = []
def populate_output(A : Node | None , A : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(A , A )
return output
def UpperCAmelCase ( A : Node | None , A : int ):
'''simple docstring'''
_UpperCAmelCase = []
def populate_output(A : Node | None , A : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(A , A )
return output
def UpperCAmelCase ( A : Node | None ):
'''simple docstring'''
if root is None:
return []
_UpperCAmelCase = []
_UpperCAmelCase = 0
_UpperCAmelCase = height(A )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(A , A ) )
_UpperCAmelCase = 1
else:
output.append(get_nodes_from_right_to_left(A , A ) )
_UpperCAmelCase = 0
return output
def UpperCAmelCase ( ): # Main function for testing.
'''simple docstring'''
_UpperCAmelCase = make_tree()
print(f'In-order Traversal: {inorder(A )}' )
print(f'Pre-order Traversal: {preorder(A )}' )
print(f'Post-order Traversal: {postorder(A )}' , '\n' )
print(f'Height of Tree: {height(A )}' , '\n' )
print('Complete Level Order Traversal: ' )
print(level_order(A ) , '\n' )
print('Level-wise order Traversal: ' )
for level in range(1 , height(A ) + 1 ):
print(f'Level {level}:' , get_nodes_from_left_to_right(A , level=A ) )
print('\nZigZag order Traversal: ' )
print(zigzag(A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 712 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase ( A : int , A : int ):
'''simple docstring'''
_UpperCAmelCase = []
create_all_state(1 , A , A , [] , A )
return result
def UpperCAmelCase ( A : int , A : int , A : int , A : list[int] , A : list[list[int]] , ):
'''simple docstring'''
if level == 0:
total_list.append(current_list[:] )
return
for i in range(A , total_number - level + 2 ):
current_list.append(A )
create_all_state(i + 1 , A , level - 1 , A , A )
current_list.pop()
def UpperCAmelCase ( A : list[list[int]] ):
'''simple docstring'''
for i in total_list:
print(*A )
if __name__ == "__main__":
lowercase = 4
lowercase = 2
lowercase = generate_all_combinations(n, k)
print_all_state(total_list)
| 24 | 0 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a__ = logging.get_logger(__name__) # pylint: disable=invalid-name
a__ = """
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def __UpperCAmelCase ( __a : Any ,__a : Optional[int] ,__a : Optional[Any]=8 ) -> Any:
"""simple docstring"""
_a : Any = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_a : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class UpperCAmelCase_ ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self , _a , _a , _a , ) -> List[Any]:
super().__init__()
self.register_modules(
unet=_a , scheduler=_a , movq=_a , )
_a : int = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowercase ( self , _a , _a , _a , _a , _a , _a ) -> Optional[Any]:
if latents is None:
_a : str = randn_tensor(_a , generator=_a , device=_a , dtype=_a )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
_a : Tuple = latents.to(_a )
_a : int = latents * scheduler.init_noise_sigma
return latents
def __lowercase ( self , _a=0 ) -> Dict:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
_a : Tuple = torch.device(F"""cuda:{gpu_id}""" )
_a : List[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_a , _a )
def __lowercase ( self , _a=0 ) -> List[Any]:
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
_a : Tuple = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=_a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_a : str = None
for cpu_offloaded_model in [self.unet, self.movq]:
_a : int = cpu_offload_with_hook(_a , _a , prev_module_hook=_a )
# We'll offload the last model manually.
_a : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowercase ( self ) -> List[str]:
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_a , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_a )
def __call__( self , _a , _a , _a , _a = 5_1_2 , _a = 5_1_2 , _a = 1_0_0 , _a = 4.0 , _a = 1 , _a = None , _a = None , _a = "pil" , _a = True , ) -> Optional[Any]:
_a : str = self._execution_device
_a : List[str] = guidance_scale > 1.0
if isinstance(_a , _a ):
_a : Union[str, Any] = torch.cat(_a , dim=0 )
if isinstance(_a , _a ):
_a : Any = torch.cat(_a , dim=0 )
if isinstance(_a , _a ):
_a : Optional[Any] = torch.cat(_a , dim=0 )
_a : List[Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
_a : int = image_embeds.repeat_interleave(_a , dim=0 )
_a : Any = negative_image_embeds.repeat_interleave(_a , dim=0 )
_a : List[Any] = hint.repeat_interleave(_a , dim=0 )
_a : int = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_a )
_a : Any = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=_a )
self.scheduler.set_timesteps(_a , device=_a )
_a : List[Any] = self.scheduler.timesteps
_a : Any = self.movq.config.latent_channels
_a : Any = downscale_height_and_width(_a , _a , self.movq_scale_factor )
# create initial latent
_a : Union[str, Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _a , _a , _a , self.scheduler , )
for i, t in enumerate(self.progress_bar(_a ) ):
# expand the latents if we are doing classifier free guidance
_a : List[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_a : int = {"image_embeds": image_embeds, "hint": hint}
_a : int = self.unet(
sample=_a , timestep=_a , encoder_hidden_states=_a , added_cond_kwargs=_a , return_dict=_a , )[0]
if do_classifier_free_guidance:
_a : Any = noise_pred.split(latents.shape[1] , dim=1 )
_a : List[str] = noise_pred.chunk(2 )
_a : Union[str, Any] = variance_pred.chunk(2 )
_a : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_a : Any = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_a : Any = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_a : List[Any] = self.scheduler.step(
_a , _a , _a , generator=_a , )[0]
# post-processing
_a : Any = self.movq.decode(_a , force_not_quantize=_a )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
_a : Tuple = image * 0.5 + 0.5
_a : Any = image.clamp(0 , 1 )
_a : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_a : Any = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 14 |
import os
import string
import sys
SCREAMING_SNAKE_CASE__ : int = 1 << 8
SCREAMING_SNAKE_CASE__ : List[str] = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 2_7,
"""up""": 6_5 + ARROW_KEY_FLAG,
"""down""": 6_6 + ARROW_KEY_FLAG,
"""right""": 6_7 + ARROW_KEY_FLAG,
"""left""": 6_8 + ARROW_KEY_FLAG,
"""mod_int""": 9_1,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 5_0,
"""delete""": 5_1,
"""pg_up""": 5_3,
"""pg_down""": 5_4,
}
SCREAMING_SNAKE_CASE__ : Any = KEYMAP["""up"""]
SCREAMING_SNAKE_CASE__ : Tuple = KEYMAP["""left"""]
if sys.platform == "win32":
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ : Dict = {
B"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
B"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(1_0):
SCREAMING_SNAKE_CASE__ : Dict = ord(str(i))
def _A ( ):
if os.name == "nt":
import msvcrt
a__ : int = "mbcs"
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(lowerCamelCase ) == 0:
# Read the keystroke
a__ : int = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
a__ : Union[str, Any] = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
a__ : Optional[int] = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["mod_int"] ) )
WIN_CH_BUFFER.append(lowerCamelCase )
if ord(lowerCamelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
a__ : int = chr(KEYMAP["esc"] )
except KeyError:
a__ : Dict = cha[1]
else:
a__ : Union[str, Any] = ch.decode(lowerCamelCase )
else:
a__ : Optional[int] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
a__ : Tuple = sys.stdin.fileno()
a__ : int = termios.tcgetattr(lowerCamelCase )
try:
tty.setraw(lowerCamelCase )
a__ : Tuple = sys.stdin.read(1 )
finally:
termios.tcsetattr(lowerCamelCase , termios.TCSADRAIN , lowerCamelCase )
return ch
def _A ( ):
a__ : str = get_raw_chars()
if ord(lowerCamelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(lowerCamelCase ) == KEYMAP["esc"]:
a__ : Tuple = get_raw_chars()
if ord(lowerCamelCase ) == KEYMAP["mod_int"]:
a__ : List[Any] = get_raw_chars()
if ord(lowerCamelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(lowerCamelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(lowerCamelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 112 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__: List[Any] = logging.get_logger(__name__)
a__: List[Any] = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = '''openai-gpt'''
__SCREAMING_SNAKE_CASE = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self,__lowerCamelCase=4_0478,__lowerCamelCase=512,__lowerCamelCase=768,__lowerCamelCase=12,__lowerCamelCase=12,__lowerCamelCase="gelu",__lowerCamelCase=0.1,__lowerCamelCase=0.1,__lowerCamelCase=0.1,__lowerCamelCase=1E-5,__lowerCamelCase=0.02,__lowerCamelCase="cls_index",__lowerCamelCase=True,__lowerCamelCase=None,__lowerCamelCase=True,__lowerCamelCase=0.1,**__lowerCamelCase,):
A__ = vocab_size
A__ = n_positions
A__ = n_embd
A__ = n_layer
A__ = n_head
A__ = afn
A__ = resid_pdrop
A__ = embd_pdrop
A__ = attn_pdrop
A__ = layer_norm_epsilon
A__ = initializer_range
A__ = summary_type
A__ = summary_use_proj
A__ = summary_activation
A__ = summary_first_dropout
A__ = summary_proj_to_labels
super().__init__(**__lowerCamelCase )
| 212 |
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self,__lowerCamelCase,__lowerCamelCase=13,__lowerCamelCase=30,__lowerCamelCase=2,__lowerCamelCase=3,__lowerCamelCase=True,__lowerCamelCase=True,__lowerCamelCase=32,__lowerCamelCase=5,__lowerCamelCase=4,__lowerCamelCase=37,__lowerCamelCase="gelu",__lowerCamelCase=0.1,__lowerCamelCase=0.1,__lowerCamelCase=10,__lowerCamelCase=0.02,__lowerCamelCase=None,__lowerCamelCase=2,):
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = scope
A__ = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A__ = (image_size // patch_size) ** 2
A__ = num_patches + 1
def UpperCamelCase ( self ):
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size],self.type_sequence_label_size )
A__ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self ):
return ViTConfig(
image_size=self.image_size,patch_size=self.patch_size,num_channels=self.num_channels,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,is_decoder=__lowerCamelCase,initializer_range=self.initializer_range,encoder_stride=self.encoder_stride,)
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = ViTModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A__ = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = ViTForMaskedImageModeling(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A__ = model(__lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A__ = 1
A__ = ViTForMaskedImageModeling(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A__ = model(__lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape,(self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = self.type_sequence_label_size
A__ = ViTForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A__ = model(__lowerCamelCase,labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A__ = 1
A__ = ViTForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A__ = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase ( self ):
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE = (
{'''feature-extraction''': ViTModel, '''image-classification''': ViTForImageClassification}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self ):
A__ = ViTModelTester(self )
A__ = ConfigTester(self,config_class=__lowerCamelCase,has_text_modality=__lowerCamelCase,hidden_size=37 )
def UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings(),(nn.Module) )
A__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase,nn.Linear ) )
def UpperCamelCase ( self ):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(__lowerCamelCase )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1],__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def UpperCamelCase ( self ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = ViTModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def UpperCamelCase__( )->int:
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self ):
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def UpperCamelCase ( self ):
A__ = ViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ).to(__lowerCamelCase )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=__lowerCamelCase,return_tensors='''pt''' ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
A__ = model(**__lowerCamelCase )
# verify the logits
A__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape,__lowerCamelCase )
A__ = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3],__lowerCamelCase,atol=1E-4 ) )
@slow
def UpperCamelCase ( self ):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
A__ = ViTModel.from_pretrained('''facebook/dino-vits8''' ).to(__lowerCamelCase )
A__ = ViTImageProcessor.from_pretrained('''facebook/dino-vits8''',size=480 )
A__ = prepare_img()
A__ = image_processor(images=__lowerCamelCase,return_tensors='''pt''' )
A__ = inputs.pixel_values.to(__lowerCamelCase )
# forward pass
with torch.no_grad():
A__ = model(__lowerCamelCase,interpolate_pos_encoding=__lowerCamelCase )
# verify the logits
A__ = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape,__lowerCamelCase )
A__ = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3],__lowerCamelCase,atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCamelCase ( self ):
A__ = ViTModel.from_pretrained('''facebook/dino-vits8''',torch_dtype=torch.floataa,device_map='''auto''' )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=__lowerCamelCase,return_tensors='''pt''' )
A__ = inputs.pixel_values.to(__lowerCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
A__ = model(__lowerCamelCase )
| 212 | 1 |
"""simple docstring"""
def _snake_case ( _snake_case : int ) -> bool:
'''simple docstring'''
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
_A = 4
_A = (1 << p) - 1
for _ in range(p - 2 ):
_A = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 7 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
a__ = numpy.array([0, 0])
a__ = numpy.array([0.5, 0.8660254])
a__ = numpy.array([1, 0])
a__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def __UpperCAmelCase ( __a : list[numpy.ndarray] ,__a : int ) -> list[numpy.ndarray]:
"""simple docstring"""
_a : Tuple = initial_vectors
for _ in range(__a ):
_a : int = iteration_step(__a )
return vectors
def __UpperCAmelCase ( __a : list[numpy.ndarray] ) -> list[numpy.ndarray]:
"""simple docstring"""
_a : Tuple = []
for i, start_vector in enumerate(vectors[:-1] ):
_a : str = vectors[i + 1]
new_vectors.append(__a )
_a : Optional[int] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 ,60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def __UpperCAmelCase ( __a : numpy.ndarray ,__a : float ) -> numpy.ndarray:
"""simple docstring"""
_a : Tuple = numpy.radians(__a )
_a , _a : List[Any] = numpy.cos(__a ), numpy.sin(__a )
_a : Dict = numpy.array(((c, -s), (s, c)) )
return numpy.dot(__a ,__a )
def __UpperCAmelCase ( __a : list[numpy.ndarray] ) -> None:
"""simple docstring"""
_a : str = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_a , _a : Optional[int] = zip(*__a )
plt.plot(__a ,__a )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 14 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _a ( lowercase_ ):
'''simple docstring'''
UpperCamelCase__ = ["""image_processor""", """tokenizer"""]
UpperCamelCase__ = """BlipImageProcessor"""
UpperCamelCase__ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_) -> List[str]:
'''simple docstring'''
lowercase__: Optional[int] = False
super().__init__(UpperCAmelCase_ , UpperCAmelCase_)
lowercase__: Any = self.image_processor
def __call__( self , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = 0 , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = True , UpperCAmelCase_ = None , **UpperCAmelCase_ , ) -> BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text.")
# Get only text
if images is None:
lowercase__: Union[str, Any] = self.tokenizer
lowercase__: str = self.tokenizer(
text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
return text_encoding
# add pixel_values
lowercase__: int = self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_)
if text is not None:
lowercase__: Optional[Any] = self.tokenizer(
text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
else:
lowercase__: Union[str, Any] = None
if text_encoding is not None:
encoding_image_processor.update(UpperCAmelCase_)
return encoding_image_processor
def __lowercase ( self , *UpperCAmelCase_ , **UpperCAmelCase_) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_)
def __lowercase ( self , *UpperCAmelCase_ , **UpperCAmelCase_) -> List[str]:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_)
@property
def __lowercase ( self) -> int:
'''simple docstring'''
lowercase__: List[str] = self.tokenizer.model_input_names
lowercase__: Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 120 |
"""simple docstring"""
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _a ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_=7 , UpperCAmelCase_=3 , UpperCAmelCase_=18 , UpperCAmelCase_=30 , UpperCAmelCase_=400 , UpperCAmelCase_=True , UpperCAmelCase_=None , UpperCAmelCase_=True , UpperCAmelCase_=None , UpperCAmelCase_=True , UpperCAmelCase_=[0.5, 0.5, 0.5] , UpperCAmelCase_=[0.5, 0.5, 0.5] , UpperCAmelCase_=False , ) -> List[str]:
'''simple docstring'''
lowercase__: Tuple = size if size is not None else {"height": 20, "width": 20}
lowercase__: Tuple = crop_size if crop_size is not None else {"height": 18, "width": 18}
lowercase__: Optional[int] = parent
lowercase__: Optional[int] = batch_size
lowercase__: Union[str, Any] = num_channels
lowercase__: Any = image_size
lowercase__: List[Any] = min_resolution
lowercase__: Optional[Any] = max_resolution
lowercase__: Dict = do_resize
lowercase__: str = size
lowercase__: str = do_center_crop
lowercase__: List[Any] = crop_size
lowercase__: List[Any] = do_normalize
lowercase__: Any = image_mean
lowercase__: Any = image_std
lowercase__: List[str] = do_reduce_labels
def __lowercase ( self) -> Tuple:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def A( ):
"""simple docstring"""
lowercase__: Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
lowercase__: List[str] = Image.open(dataset[0]["file"] )
lowercase__: str = Image.open(dataset[1]["file"] )
return image, map
def A( ):
"""simple docstring"""
lowercase__: Optional[Any] = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
lowercase__: Any = Image.open(ds[0]["file"] )
lowercase__: List[Any] = Image.open(ds[1]["file"] )
lowercase__: Optional[Any] = Image.open(ds[2]["file"] )
lowercase__: Tuple = Image.open(ds[3]["file"] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _a ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ = BeitImageProcessor if is_vision_available() else None
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
lowercase__: str = BeitImageProcessingTester(self)
@property
def __lowercase ( self) -> Tuple:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
lowercase__: List[str] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCAmelCase_ , "do_resize"))
self.assertTrue(hasattr(UpperCAmelCase_ , "size"))
self.assertTrue(hasattr(UpperCAmelCase_ , "do_center_crop"))
self.assertTrue(hasattr(UpperCAmelCase_ , "center_crop"))
self.assertTrue(hasattr(UpperCAmelCase_ , "do_normalize"))
self.assertTrue(hasattr(UpperCAmelCase_ , "image_mean"))
self.assertTrue(hasattr(UpperCAmelCase_ , "image_std"))
def __lowercase ( self) -> Tuple:
'''simple docstring'''
lowercase__: Tuple = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"height": 20, "width": 20})
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18})
self.assertEqual(image_processor.do_reduce_labels , UpperCAmelCase_)
lowercase__: Dict = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=UpperCAmelCase_)
self.assertEqual(image_processor.size , {"height": 42, "width": 42})
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84})
self.assertEqual(image_processor.do_reduce_labels , UpperCAmelCase_)
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
pass
def __lowercase ( self) -> int:
'''simple docstring'''
lowercase__: int = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowercase__: List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image)
# Test not batched input
lowercase__: int = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase__: Optional[int] = image_processing(UpperCAmelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __lowercase ( self) -> List[str]:
'''simple docstring'''
lowercase__: Tuple = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowercase__: Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray)
# Test not batched input
lowercase__: Any = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase__: Any = image_processing(UpperCAmelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __lowercase ( self) -> str:
'''simple docstring'''
lowercase__: List[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowercase__: List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor)
# Test not batched input
lowercase__: List[str] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase__: Tuple = image_processing(UpperCAmelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Dict = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowercase__: Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_)
lowercase__: Optional[Any] = []
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor)
maps.append(torch.zeros(image.shape[-2:]).long())
# Test not batched input
lowercase__: Any = image_processing(image_inputs[0] , maps[0] , return_tensors="pt")
self.assertEqual(
encoding["pixel_values"].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long)
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 255)
# Test batched
lowercase__: Tuple = image_processing(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors="pt")
self.assertEqual(
encoding["pixel_values"].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long)
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 255)
# Test not batched input (PIL images)
lowercase__ , lowercase__: List[str] = prepare_semantic_single_inputs()
lowercase__: int = image_processing(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors="pt")
self.assertEqual(
encoding["pixel_values"].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long)
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 255)
# Test batched input (PIL images)
lowercase__ , lowercase__: List[Any] = prepare_semantic_batch_inputs()
lowercase__: List[str] = image_processing(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors="pt")
self.assertEqual(
encoding["pixel_values"].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
2,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long)
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 255)
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
lowercase__: Tuple = self.image_processing_class(**self.image_processor_dict)
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
lowercase__ , lowercase__: List[Any] = prepare_semantic_single_inputs()
lowercase__: List[str] = image_processing(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors="pt")
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 150)
lowercase__: Dict = True
lowercase__: Dict = image_processing(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors="pt")
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 255)
| 120 | 1 |
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class __lowercase ( tf.keras.layers.Layer ):
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=1 , UpperCamelCase=False , **UpperCamelCase ) -> List[str]:
super().__init__(**UpperCamelCase )
__a = vocab_size
__a = d_embed
__a = d_proj
__a = cutoffs + [vocab_size]
__a = [0] + self.cutoffs
__a = div_val
__a = self.cutoffs[0]
__a = len(self.cutoffs ) - 1
__a = self.shortlist_size + self.n_clusters
__a = keep_order
__a = []
__a = []
def UpperCamelCase__ ( self , UpperCamelCase ) -> Dict:
if self.n_clusters > 0:
__a = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='zeros' , trainable=UpperCamelCase , name='cluster_weight' )
__a = self.add_weight(
shape=(self.n_clusters,) , initializer='zeros' , trainable=UpperCamelCase , name='cluster_bias' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
__a = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='zeros' , trainable=UpperCamelCase , name=f"out_projs_._{i}" , )
self.out_projs.append(UpperCamelCase )
else:
self.out_projs.append(UpperCamelCase )
__a = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='zeros' , trainable=UpperCamelCase , name=f"out_layers_._{i}_._weight" , )
__a = self.add_weight(
shape=(self.vocab_size,) , initializer='zeros' , trainable=UpperCamelCase , name=f"out_layers_._{i}_._bias" , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
__a , __a = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__a = self.d_embed // (self.div_val**i)
__a = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='zeros' , trainable=UpperCamelCase , name=f"out_projs_._{i}" )
self.out_projs.append(UpperCamelCase )
__a = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='zeros' , trainable=UpperCamelCase , name=f"out_layers_._{i}_._weight" , )
__a = self.add_weight(
shape=(r_idx - l_idx,) , initializer='zeros' , trainable=UpperCamelCase , name=f"out_layers_._{i}_._bias" , )
self.out_layers.append((weight, bias) )
super().build(UpperCamelCase )
@staticmethod
def UpperCamelCase__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None ) -> Dict:
__a = x
if proj is not None:
__a = tf.einsum('ibd,ed->ibe' , UpperCamelCase , UpperCamelCase )
return tf.einsum('ibd,nd->ibn' , UpperCamelCase , UpperCamelCase ) + b
@staticmethod
def UpperCamelCase__ ( UpperCamelCase , UpperCamelCase ) -> List[str]:
__a = shape_list(UpperCamelCase )
__a = tf.range(lp_size[0] , dtype=target.dtype )
__a = tf.stack([r, target] , 1 )
return tf.gather_nd(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase=True , UpperCamelCase=False ) -> Union[str, Any]:
__a = 0
if self.n_clusters == 0:
__a = self._logit(UpperCamelCase , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
__a = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=UpperCamelCase , logits=UpperCamelCase )
__a = tf.nn.log_softmax(UpperCamelCase , axis=-1 )
else:
__a = shape_list(UpperCamelCase )
__a = []
__a = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
__a , __a = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
__a = (target >= l_idx) & (target < r_idx)
__a = tf.where(UpperCamelCase )
__a = tf.boolean_mask(UpperCamelCase , UpperCamelCase ) - l_idx
if self.div_val == 1:
__a = self.out_layers[0][0][l_idx:r_idx]
__a = self.out_layers[0][1][l_idx:r_idx]
else:
__a = self.out_layers[i][0]
__a = self.out_layers[i][1]
if i == 0:
__a = tf.concat([cur_W, self.cluster_weight] , 0 )
__a = tf.concat([cur_b, self.cluster_bias] , 0 )
__a = self._logit(UpperCamelCase , UpperCamelCase , UpperCamelCase , self.out_projs[0] )
__a = tf.nn.log_softmax(UpperCamelCase )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
__a = tf.boolean_mask(UpperCamelCase , UpperCamelCase )
__a = self._gather_logprob(UpperCamelCase , UpperCamelCase )
else:
__a = self._logit(UpperCamelCase , UpperCamelCase , UpperCamelCase , self.out_projs[i] )
__a = tf.nn.log_softmax(UpperCamelCase )
__a = self.cutoffs[0] + i - 1 # No probability for the head cluster
__a = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(UpperCamelCase )
if target is not None:
__a = tf.boolean_mask(UpperCamelCase , UpperCamelCase )
__a = tf.boolean_mask(UpperCamelCase , UpperCamelCase )
__a = self._gather_logprob(UpperCamelCase , UpperCamelCase )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(UpperCamelCase , -cur_logprob , shape_list(UpperCamelCase ) )
__a = tf.concat(UpperCamelCase , axis=-1 )
if target is not None:
if return_mean:
__a = tf.reduce_mean(UpperCamelCase )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(UpperCamelCase )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(UpperCamelCase , name=self.name , aggregation='mean' if return_mean else '' )
return out
| 539 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
"configuration_trajectory_transformer": [
"TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TrajectoryTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrajectoryTransformerModel",
"TrajectoryTransformerPreTrainedModel",
"load_tf_weights_in_trajectory_transformer",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 539 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""])
parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
__lowerCAmelCase = parser.parse_args()
if args.model_type == "bert":
__lowerCAmelCase = BertForMaskedLM.from_pretrained(args.model_name)
__lowerCAmelCase = '''bert'''
else:
raise ValueError("""args.model_type should be \"bert\".""")
__lowerCAmelCase = model.state_dict()
__lowerCAmelCase = {}
for w in ["word_embeddings", "position_embeddings"]:
__lowerCAmelCase = state_dict[f'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
__lowerCAmelCase = state_dict[f'''{prefix}.embeddings.LayerNorm.{w}''']
__lowerCAmelCase = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
__lowerCAmelCase = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
__lowerCAmelCase = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
__lowerCAmelCase = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
__lowerCAmelCase = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
__lowerCAmelCase = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
__lowerCAmelCase = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
__lowerCAmelCase = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
__lowerCAmelCase = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
__lowerCAmelCase = state_dict['''cls.predictions.decoder.weight''']
__lowerCAmelCase = state_dict['''cls.predictions.bias''']
if args.vocab_transform:
for w in ["weight", "bias"]:
__lowerCAmelCase = state_dict[f'''cls.predictions.transform.dense.{w}''']
__lowerCAmelCase = state_dict[f'''cls.predictions.transform.LayerNorm.{w}''']
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 719 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
"""configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimesformerModel""",
"""TimesformerForVideoClassification""",
"""TimesformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 319 | 0 |
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class lowerCamelCase ( __lowerCamelCase ):
def __init__( self :int , lowercase :Optional[NestedDataStructureLike[PathLike]] = None , lowercase :Optional[NamedSplit] = None , lowercase :Optional[Features] = None , lowercase :str = None , lowercase :bool = False , lowercase :bool = False , lowercase :Optional[int] = None , **lowercase :str , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = path_or_paths
SCREAMING_SNAKE_CASE = split if split or isinstance(lowercase , lowercase ) else '''train'''
SCREAMING_SNAKE_CASE = features
SCREAMING_SNAKE_CASE = cache_dir
SCREAMING_SNAKE_CASE = keep_in_memory
SCREAMING_SNAKE_CASE = streaming
SCREAMING_SNAKE_CASE = num_proc
SCREAMING_SNAKE_CASE = kwargs
@abstractmethod
def snake_case__ ( self :Optional[int] ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
"""simple docstring"""
pass
class lowerCamelCase ( __lowerCamelCase ):
def __init__( self :int , lowercase :Optional[Features] = None , lowercase :str = None , lowercase :bool = False , lowercase :bool = False , lowercase :Optional[int] = None , **lowercase :List[Any] , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = features
SCREAMING_SNAKE_CASE = cache_dir
SCREAMING_SNAKE_CASE = keep_in_memory
SCREAMING_SNAKE_CASE = streaming
SCREAMING_SNAKE_CASE = num_proc
SCREAMING_SNAKE_CASE = kwargs
@abstractmethod
def snake_case__ ( self :Any ) -> Union[Dataset, IterableDataset]:
"""simple docstring"""
pass | 201 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 201 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : torch.FloatTensor
class A__ ( __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
@register_to_config
def __init__( self , lowercase = 3 , lowercase = 3 , lowercase = ("DownEncoderBlock2D",) , lowercase = ("UpDecoderBlock2D",) , lowercase = (64,) , lowercase = 1 , lowercase = "silu" , lowercase = 3 , lowercase = 32 , lowercase = 256 , lowercase = 32 , lowercase = None , lowercase = 0.1_82_15 , lowercase = "group" , ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
a__ : List[Any] = Encoder(
in_channels=lowercase , out_channels=lowercase , down_block_types=lowercase , block_out_channels=lowercase , layers_per_block=lowercase , act_fn=lowercase , norm_num_groups=lowercase , double_z=lowercase , )
a__ : int = vq_embed_dim if vq_embed_dim is not None else latent_channels
a__ : Any = nn.Convad(lowercase , lowercase , 1)
a__ : List[str] = VectorQuantizer(lowercase , lowercase , beta=0.25 , remap=lowercase , sane_index_shape=lowercase)
a__ : Any = nn.Convad(lowercase , lowercase , 1)
# pass init params to Decoder
a__ : List[str] = Decoder(
in_channels=lowercase , out_channels=lowercase , up_block_types=lowercase , block_out_channels=lowercase , layers_per_block=lowercase , act_fn=lowercase , norm_num_groups=lowercase , norm_type=lowercase , )
@apply_forward_hook
def __lowercase ( self , lowercase , lowercase = True) -> VQEncoderOutput:
'''simple docstring'''
a__ : List[Any] = self.encoder(lowercase)
a__ : Dict = self.quant_conv(lowercase)
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowercase)
@apply_forward_hook
def __lowercase ( self , lowercase , lowercase = False , lowercase = True) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
if not force_not_quantize:
a__ , a__ , a__ : Dict = self.quantize(lowercase)
else:
a__ : List[Any] = h
a__ : List[Any] = self.post_quant_conv(lowercase)
a__ : Tuple = self.decoder(lowercase , quant if self.config.norm_type == 'spatial' else None)
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase)
def __lowercase ( self , lowercase , lowercase = True) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
a__ : int = sample
a__ : Any = self.encode(lowercase).latents
a__ : Optional[Any] = self.decode(lowercase).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase)
| 392 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Any = tempfile.mkdtemp()
a__ : Tuple = 5
# Realm tok
a__ : List[Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'test',
'question',
'this',
'is',
'the',
'first',
'second',
'third',
'fourth',
'fifth',
'record',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
a__ : Any = os.path.join(self.tmpdirname , 'realm_tokenizer')
os.makedirs(lowercase , exist_ok=lowercase)
a__ : int = os.path.join(lowercase , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
a__ : List[str] = os.path.join(self.tmpdirname , 'realm_block_records')
os.makedirs(lowercase , exist_ok=lowercase)
def __lowercase ( self) -> RealmTokenizer:
'''simple docstring'''
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'realm_tokenizer'))
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : int = RealmConfig(num_block_records=self.num_block_records)
return config
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : Tuple = Dataset.from_dict(
{
'id': ['0', '1'],
'question': ['foo', 'bar'],
'answers': [['Foo', 'Bar'], ['Bar']],
})
return dataset
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : Optional[int] = np.array(
[
b'This is the first record',
b'This is the second record',
b'This is the third record',
b'This is the fourth record',
b'This is the fifth record',
b'This is a longer longer longer record',
] , dtype=lowercase , )
return block_records
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : Dict = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : List[Any] = self.get_config()
a__ : Tuple = self.get_dummy_retriever()
a__ : Tuple = retriever.tokenizer
a__ : str = np.array([0, 3] , dtype='long')
a__ : Optional[int] = tokenizer(['Test question']).input_ids
a__ : List[str] = tokenizer(
['the fourth'] , add_special_tokens=lowercase , return_token_type_ids=lowercase , return_attention_mask=lowercase , ).input_ids
a__ : str = config.reader_seq_len
a__ , a__ , a__ , a__ : int = retriever(
lowercase , lowercase , answer_ids=lowercase , max_length=lowercase , return_tensors='np')
self.assertEqual(len(lowercase) , 2)
self.assertEqual(len(lowercase) , 2)
self.assertEqual(len(lowercase) , 2)
self.assertEqual(concat_inputs.input_ids.shape , (2, 10))
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10))
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10))
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10))
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0]) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'first', 'record', '[SEP]'] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1]) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'fourth', 'record', '[SEP]'] , )
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : List[str] = self.get_config()
a__ : Union[str, Any] = self.get_dummy_retriever()
a__ : List[Any] = retriever.tokenizer
a__ : Any = np.array([0, 3, 5] , dtype='long')
a__ : Tuple = tokenizer(['Test question']).input_ids
a__ : Optional[Any] = tokenizer(
['the fourth', 'longer longer'] , add_special_tokens=lowercase , return_token_type_ids=lowercase , return_attention_mask=lowercase , ).input_ids
a__ : Dict = config.reader_seq_len
a__ , a__ , a__ , a__ : Dict = retriever(
lowercase , lowercase , answer_ids=lowercase , max_length=lowercase , return_tensors='np')
self.assertEqual([False, True, True] , lowercase)
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , lowercase)
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , lowercase)
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : Union[str, Any] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , 'realm_block_records'))
# Test local path
a__ : Optional[int] = retriever.from_pretrained(os.path.join(self.tmpdirname , 'realm_block_records'))
self.assertEqual(retriever.block_records[0] , b'This is the first record')
# Test mocked remote path
with patch('transformers.models.realm.retrieval_realm.hf_hub_download') as mock_hf_hub_download:
a__ : str = os.path.join(
os.path.join(self.tmpdirname , 'realm_block_records') , _REALM_BLOCK_RECORDS_FILENAME)
a__ : str = RealmRetriever.from_pretrained('google/realm-cc-news-pretrained-openqa')
self.assertEqual(retriever.block_records[0] , b'This is the first record')
| 392 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowerCamelCase__( snake_case_ ):
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCAmelCase , """tf_padding""" ) )
self.parent.assertTrue(hasattr(__UpperCAmelCase , """depth_multiplier""" ) )
class lowerCamelCase__:
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=1_3 , __UpperCAmelCase=3 , __UpperCAmelCase=3_2 , __UpperCAmelCase=0.25 , __UpperCAmelCase=8 , __UpperCAmelCase=8 , __UpperCAmelCase=6 , __UpperCAmelCase=3_2 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase="relu6" , __UpperCAmelCase=1_2_8_0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=1_0 , __UpperCAmelCase=None , ):
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = image_size
__lowercase = depth_multiplier
__lowercase = depth_divisible_by
__lowercase = min_depth
__lowercase = expand_ratio
__lowercase = tf_padding
__lowercase = output_stride
__lowercase = first_layer_is_expansion
__lowercase = finegrained_output
__lowercase = hidden_act
__lowercase = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
__lowercase = classifier_dropout_prob
__lowercase = use_labels
__lowercase = is_training
__lowercase = num_labels
__lowercase = initializer_range
__lowercase = scope
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.num_labels )
__lowercase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels, pixel_labels
def __magic_name__ ( self ):
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = MobileNetVaModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowercase = model(__UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = MobileNetVaForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowercase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = MobileNetVaForSemanticSegmentation(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowercase = model(__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__lowercase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__( snake_case_ , snake_case_ , unittest.TestCase ):
UpperCamelCase : Dict = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase : List[Any] = (
{
"feature-extraction": MobileNetVaModel,
"image-classification": MobileNetVaForImageClassification,
"image-segmentation": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase : str = False
UpperCamelCase : List[Any] = False
UpperCamelCase : Any = False
UpperCamelCase : Dict = False
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = MobileNetVaModelTester(self )
__lowercase = MobileNetVaConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV2 does not use inputs_embeds""" )
def __magic_name__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not support input and output embeddings""" )
def __magic_name__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not output attentions""" )
def __magic_name__ ( self ):
"""simple docstring"""
pass
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(__UpperCAmelCase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__lowercase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
__lowercase = outputs.hidden_states
__lowercase = 1_6
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase )
@slow
def __magic_name__ ( self ):
"""simple docstring"""
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = MobileNetVaModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def lowercase__ ( ):
'''simple docstring'''
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCamelCase__( unittest.TestCase ):
@cached_property
def __magic_name__ ( self ):
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v2_1.0_224""" ) if is_vision_available() else None
)
@slow
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v2_1.0_224""" ).to(__UpperCAmelCase )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__lowercase = model(**__UpperCAmelCase )
# verify the logits
__lowercase = torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
__lowercase = torch.tensor([0.24_45, -1.19_93, 0.19_05] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = MobileNetVaForSemanticSegmentation.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
__lowercase = model.to(__UpperCAmelCase )
__lowercase = MobileNetVaImageProcessor.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
__lowercase = prepare_img()
__lowercase = image_processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__lowercase = model(**__UpperCAmelCase )
__lowercase = outputs.logits
# verify the logits
__lowercase = torch.Size((1, 2_1, 6_5, 6_5) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__lowercase = torch.tensor(
[
[[17.57_90, 17.75_81, 18.33_55], [18.32_57, 18.42_30, 18.89_73], [18.61_69, 18.86_50, 19.21_87]],
[[-2.15_95, -2.09_77, -2.37_41], [-2.42_26, -2.30_28, -2.68_35], [-2.78_19, -2.59_91, -2.77_06]],
[[4.20_58, 4.83_17, 4.76_38], [4.41_36, 5.03_61, 4.93_83], [4.50_28, 4.96_44, 4.87_34]],
] , device=__UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 566 |
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
snake_case : List[Any] = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__( snake_case_ , unittest.TestCase ):
UpperCamelCase : List[str] = DebertaVaTokenizer
UpperCamelCase : Optional[int] = DebertaVaTokenizerFast
UpperCamelCase : Tuple = True
UpperCamelCase : Dict = True
def __magic_name__ ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__lowercase = DebertaVaTokenizer(__UpperCAmelCase , unk_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = """this is a test"""
__lowercase = """this is a test"""
return input_text, output_text
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = """<pad>"""
__lowercase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """[PAD]""" )
self.assertEqual(len(__UpperCAmelCase ) , 3_0_0_0_1 )
def __magic_name__ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = """ \tHeLLo!how \n Are yoU? """
__lowercase = ["""▁hello""", """!""", """how""", """▁are""", """▁you""", """?"""]
# fmt: on
__lowercase = DebertaVaTokenizer(__UpperCAmelCase , do_lower_case=__UpperCAmelCase )
__lowercase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = DebertaVaTokenizerFast(__UpperCAmelCase , do_lower_case=__UpperCAmelCase )
__lowercase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def __magic_name__ ( self ):
"""simple docstring"""
pass
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def __magic_name__ ( self ):
"""simple docstring"""
pass
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = """I was born in 92000, and this is falsé."""
__lowercase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowercase = DebertaVaTokenizer(__UpperCAmelCase , split_by_punct=__UpperCAmelCase )
__lowercase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = DebertaVaTokenizerFast(__UpperCAmelCase , split_by_punct=__UpperCAmelCase )
__lowercase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = """I was born in 92000, and this is falsé."""
__lowercase = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowercase = DebertaVaTokenizer(__UpperCAmelCase , do_lower_case=__UpperCAmelCase , split_by_punct=__UpperCAmelCase )
__lowercase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = DebertaVaTokenizerFast(__UpperCAmelCase , do_lower_case=__UpperCAmelCase , split_by_punct=__UpperCAmelCase )
__lowercase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = """I was born in 92000, and this is falsé."""
__lowercase = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
__lowercase = DebertaVaTokenizer(__UpperCAmelCase , do_lower_case=__UpperCAmelCase , split_by_punct=__UpperCAmelCase )
__lowercase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = DebertaVaTokenizerFast(__UpperCAmelCase , do_lower_case=__UpperCAmelCase , split_by_punct=__UpperCAmelCase )
__lowercase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = """I was born in 92000, and this is falsé."""
__lowercase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowercase = DebertaVaTokenizer(__UpperCAmelCase , do_lower_case=__UpperCAmelCase , split_by_punct=__UpperCAmelCase )
__lowercase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = DebertaVaTokenizerFast(__UpperCAmelCase , do_lower_case=__UpperCAmelCase , split_by_punct=__UpperCAmelCase )
__lowercase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = """ \tHeLLo!how \n Are yoU? """
__lowercase = ["""▁""", """<unk>""", """e""", """<unk>""", """o""", """!""", """how""", """▁""", """<unk>""", """re""", """▁yo""", """<unk>""", """?"""]
# fmt: on
__lowercase = DebertaVaTokenizer(__UpperCAmelCase , do_lower_case=__UpperCAmelCase , split_by_punct=__UpperCAmelCase )
__lowercase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = DebertaVaTokenizerFast(__UpperCAmelCase , do_lower_case=__UpperCAmelCase , split_by_punct=__UpperCAmelCase )
__lowercase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.get_tokenizer()
__lowercase = self.get_rust_tokenizer()
__lowercase = """I was born in 92000, and this is falsé."""
__lowercase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
__lowercase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
__lowercase = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = self.get_rust_tokenizer()
__lowercase = tokenizer.encode(__UpperCAmelCase )
__lowercase = rust_tokenizer.encode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = """This is a test"""
__lowercase = [1_3, 1, 4_3_9_8, 2_5, 2_1, 1_2_8_9]
__lowercase = ["""▁""", """T""", """his""", """▁is""", """▁a""", """▁test"""]
__lowercase = ["""▁""", """<unk>""", """his""", """▁is""", """▁a""", """▁test"""]
__lowercase = DebertaVaTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
__lowercase = DebertaVaTokenizerFast(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
__lowercase = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = rust_tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
# fmt: off
__lowercase = """I was born in 92000, and this is falsé."""
__lowercase = [1_3, 1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9]
__lowercase = ["""▁""", """I""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """.""", ]
__lowercase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
__lowercase = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = rust_tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = DebertaVaTokenizer(__UpperCAmelCase )
__lowercase = tokenizer.encode("""sequence builders""" )
__lowercase = tokenizer.encode("""multi-sequence build""" )
__lowercase = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , __UpperCAmelCase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , __UpperCAmelCase , )
@slow
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = {"""input_ids""": [[1, 3_9_8_6_7, 3_6, 1_9_3_9_0, 4_8_6, 2_7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 6_0_6_8_5, 1_2_2_5, 7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 9_3_6_7, 1_6_8_9_9, 1_8, 1_5_9_3_7, 5_3, 5_9_4, 7_7_3, 1_8, 1_6_2_8_7, 3_0_4_6_5, 3_6, 1_5_9_3_7, 6, 4_1_1_3_9, 3_8, 3_6_9_7_9, 6_0_7_6_3, 1_9_1, 6, 3_4_1_3_2, 9_9, 6, 5_0_5_3_8, 3_9_0, 4_3_2_3_0, 6, 3_4_1_3_2, 2_7_7_9, 2_0_8_5_0, 1_4, 6_9_9, 1_0_7_2, 1_1_9_4, 3_6, 3_8_2, 1_0_9_0_1, 5_3, 7, 6_9_9, 1_0_7_2, 2_0_8_4, 3_6, 2_0_4_2_2, 6_3_0, 5_3, 1_9, 1_0_5, 3_0_4_9, 1_8_9_6, 1_0_5_3, 1_6_8_9_9, 1_5_0_6, 1_1, 3_7_9_7_8, 4_2_4_3, 7, 1_2_3_7, 3_1_8_6_9, 2_0_0, 1_6_5_6_6, 6_5_4, 6, 3_5_0_5_2, 8_1_4_3_6, 7, 5_5_6_3_0, 1_3_5_9_3, 4, 2], [1, 2_6, 1_5_0_1_1, 1_3, 6_6_7, 8, 1_0_5_3, 1_8, 2_3_6_1_1, 1_2_3_7, 7_2_3_5_6, 1_2_8_2_0, 3_4, 1_0_4_1_3_4, 1_2_0_9, 3_5, 1_3_3_1_3, 6_6_2_7, 2_1, 2_0_2, 3_4_7, 7, 1_6_4, 2_3_9_9, 1_1, 4_6, 4_4_8_5, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_2_3_2, 2_8_6_4, 1_5_7_8_5, 1_4_9_5_1, 1_0_5, 5, 8_5_8_1, 1_2_5_0, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name="""microsoft/deberta-v2-xlarge""" , revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" , )
| 566 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : int = 1
A : Union[str, Any] = 3
A : Optional[Any] = (32, 32)
A : List[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE )
return image
@property
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
A : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
A : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
A : Tuple = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(SCREAMING_SNAKE_CASE )
@property
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
def extract(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
class A :
def __init__( self ) -> List[str]:
"""simple docstring"""
A : Any = torch.ones([0] )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
self.pixel_values.to(SCREAMING_SNAKE_CASE )
return self
return Out()
return extract
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A : str = self.dummy_cond_unet
A : Optional[Any] = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE )
A : int = self.dummy_vae
A : List[str] = self.dummy_text_encoder
A : Dict = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
A : Optional[int] = 77
A : int = self.dummy_image.to(SCREAMING_SNAKE_CASE )
A : Optional[Any] = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
A : Any = AltDiffusionImgaImgPipeline(
unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , vae=SCREAMING_SNAKE_CASE , text_encoder=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , )
A : int = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=SCREAMING_SNAKE_CASE )
A : Union[str, Any] = alt_pipe.to(SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
A : Union[str, Any] = '''A painting of a squirrel eating a burger'''
A : Dict = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(0 )
A : Optional[Any] = alt_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=SCREAMING_SNAKE_CASE , )
A : Optional[int] = output.images
A : Union[str, Any] = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(0 )
A : List[str] = alt_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , )[0]
A : Dict = image[0, -3:, -3:, -1]
A : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A : List[str] = np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : Union[str, Any] = self.dummy_cond_unet
A : Dict = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE )
A : List[Any] = self.dummy_vae
A : int = self.dummy_text_encoder
A : Any = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
A : List[Any] = 77
A : List[Any] = self.dummy_image.to(SCREAMING_SNAKE_CASE )
# put models in fp16
A : Dict = unet.half()
A : Any = vae.half()
A : Optional[int] = bert.half()
# make sure here that pndm scheduler skips prk
A : List[str] = AltDiffusionImgaImgPipeline(
unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , vae=SCREAMING_SNAKE_CASE , text_encoder=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , )
A : Union[str, Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=SCREAMING_SNAKE_CASE )
A : Dict = alt_pipe.to(SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
A : Any = '''A painting of a squirrel eating a burger'''
A : str = torch.manual_seed(0 )
A : Dict = alt_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type='''np''' , image=SCREAMING_SNAKE_CASE , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
# resize to resolution that is divisible by 8 but not 16 or 32
A : List[str] = init_image.resize((760, 504) )
A : List[Any] = '''BAAI/AltDiffusion'''
A : Optional[int] = AltDiffusionImgaImgPipeline.from_pretrained(
SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE , )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
A : Tuple = '''A fantasy landscape, trending on artstation'''
A : List[str] = torch.manual_seed(0 )
A : Any = pipe(
prompt=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , strength=0.75 , guidance_scale=7.5 , generator=SCREAMING_SNAKE_CASE , output_type='''np''' , )
A : Union[str, Any] = output.images[0]
A : Dict = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
A : List[str] = np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
A : Union[str, Any] = init_image.resize((768, 512) )
A : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' )
A : int = '''BAAI/AltDiffusion'''
A : Union[str, Any] = AltDiffusionImgaImgPipeline.from_pretrained(
SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE , )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
A : List[str] = '''A fantasy landscape, trending on artstation'''
A : Tuple = torch.manual_seed(0 )
A : Any = pipe(
prompt=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , strength=0.75 , guidance_scale=7.5 , generator=SCREAMING_SNAKE_CASE , output_type='''np''' , )
A : Union[str, Any] = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 343 |
'''simple docstring'''
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowercase : Optional[int] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
lowercase : Tuple = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowercase : int = transformers.models.auto.configuration_auto.CONFIG_MAPPING
lowercase : Tuple = {
# used to compute the property `self.chunk_length`
'EncodecConfig': ['overlap'],
# used as `self.bert_model = BertModel(config, ...)`
'DPRConfig': True,
# not used in modeling files, but it's an important information
'FSMTConfig': ['langs'],
# used internally in the configuration class file
'GPTNeoConfig': ['attention_types'],
# used internally in the configuration class file
'EsmConfig': ['is_folding_model'],
# used during training (despite we don't have training script for these models yet)
'Mask2FormerConfig': ['ignore_value'],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'OneFormerConfig': ['ignore_value', 'norm'],
# used during preprocessing and collation, see `collating_graphormer.py`
'GraphormerConfig': ['spatial_pos_max'],
# used internally in the configuration class file
'T5Config': ['feed_forward_proj'],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'MT5Config': ['feed_forward_proj', 'tokenizer_class'],
'UMT5Config': ['feed_forward_proj', 'tokenizer_class'],
# used internally in the configuration class file
'LongT5Config': ['feed_forward_proj'],
# used internally in the configuration class file
'SwitchTransformersConfig': ['feed_forward_proj'],
# having default values other than `1e-5` - we can't fix them without breaking
'BioGptConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'GLPNConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'SegformerConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'CvtConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'PerceiverConfig': ['layer_norm_eps'],
# used internally to calculate the feature size
'InformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'TimeSeriesTransformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'AutoformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate `mlp_dim`
'SamVisionConfig': ['mlp_ratio'],
# For (head) training, but so far not implemented
'ClapAudioConfig': ['num_classes'],
# Not used, but providing useful information to users
'SpeechT5HifiGanConfig': ['sampling_rate'],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'CLIPSegConfig': True,
'DeformableDetrConfig': True,
'DetaConfig': True,
'DinatConfig': True,
'DonutSwinConfig': True,
'EfficientFormerConfig': True,
'FSMTConfig': True,
'JukeboxConfig': True,
'LayoutLMv2Config': True,
'MaskFormerSwinConfig': True,
'MT5Config': True,
'NatConfig': True,
'OneFormerConfig': True,
'PerceiverConfig': True,
'RagConfig': True,
'SpeechT5Config': True,
'SwinConfig': True,
'Swin2SRConfig': True,
'Swinv2Config': True,
'SwitchTransformersConfig': True,
'TableTransformerConfig': True,
'TapasConfig': True,
'TransfoXLConfig': True,
'UniSpeechConfig': True,
'UniSpeechSatConfig': True,
'WavLMConfig': True,
'WhisperConfig': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'JukeboxPriorConfig': True,
# TODO: @Younes (for `is_decoder`)
'Pix2StructTextConfig': True,
}
)
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : List[str] = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F'config.{attribute}' in modeling_source
or F'getattr(config, "{attribute}"' in modeling_source
or F'getattr(self.config, "{attribute}"' in modeling_source
):
A : Dict = True
# Deal with multi-line cases
elif (
re.search(
RF'getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"' , snake_case__ , )
is not None
):
A : int = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
A : Optional[Any] = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
A : Tuple = [
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
A : List[Any] = ['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
A : List[Any] = True
if not attribute_used:
A : str = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
A : Tuple = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
A : Optional[Any] = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
A : Union[str, Any] = True
elif attribute.endswith('''_token_id''' ):
A : Dict = True
# configuration class specific cases
if not case_allowed:
A : Union[str, Any] = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
A : List[str] = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : List[Any] = dict(inspect.signature(config_class.__init__ ).parameters )
A : Tuple = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
A : int = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
A : Dict = {}
if len(config_class.attribute_map ) > 0:
A : str = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
A : Optional[Any] = inspect.getsourcefile(snake_case__ )
A : Optional[int] = os.path.dirname(snake_case__ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
A : Union[str, Any] = [os.path.join(snake_case__ , snake_case__ ) for fn in os.listdir(snake_case__ ) if fn.startswith('''modeling_''' )]
# Get the source code strings
A : List[Any] = []
for path in modeling_paths:
if os.path.isfile(snake_case__ ):
with open(snake_case__ ) as fp:
modeling_sources.append(fp.read() )
A : str = []
for config_param, default_value in zip(snake_case__ , snake_case__ ):
# `attributes` here is all the variant names for `config_param`
A : Union[str, Any] = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
unused_attributes.append(attributes[0] )
return sorted(snake_case__ )
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : int = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
A : str = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda snake_case__ : inspect.isclass(snake_case__ )
and issubclass(snake_case__ , snake_case__ )
and inspect.getmodule(snake_case__ ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
A : List[Any] = check_config_attributes_being_used(snake_case__ )
if len(snake_case__ ) > 0:
A : Tuple = unused_attributes
if len(snake_case__ ) > 0:
A : Any = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += F'{name}: {attributes}\n'
raise ValueError(snake_case__ )
if __name__ == "__main__":
check_config_attributes()
| 343 | 1 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
UpperCamelCase : int = datasets.utils.logging.get_logger(__name__)
UpperCamelCase : List[str] = ['names', 'prefix']
UpperCamelCase : List[str] = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
UpperCamelCase : Optional[int] = ['encoding_errors', 'on_bad_lines']
UpperCamelCase : Optional[int] = ['date_format']
@dataclass
class UpperCamelCase__ (datasets.BuilderConfig ):
'''simple docstring'''
_UpperCamelCase = ","
_UpperCamelCase = None
_UpperCamelCase = "infer"
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = True
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = False
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = False
_UpperCamelCase = True
_UpperCamelCase = None
_UpperCamelCase = "."
_UpperCamelCase = None
_UpperCamelCase = '"'
_UpperCamelCase = 0
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = 0
_UpperCamelCase = True
_UpperCamelCase = False
_UpperCamelCase = None
_UpperCamelCase = 10000
_UpperCamelCase = None
_UpperCamelCase = "strict"
_UpperCamelCase = "error"
_UpperCamelCase = None
def UpperCamelCase_ ( self ):
if self.delimiter is not None:
lowerCamelCase__ = self.delimiter
if self.column_names is not None:
lowerCamelCase__ = self.column_names
@property
def UpperCamelCase_ ( self ):
lowerCamelCase__ = {
"""sep""": self.sep,
"""header""": self.header,
"""names""": self.names,
"""index_col""": self.index_col,
"""usecols""": self.usecols,
"""prefix""": self.prefix,
"""mangle_dupe_cols""": self.mangle_dupe_cols,
"""engine""": self.engine,
"""converters""": self.converters,
"""true_values""": self.true_values,
"""false_values""": self.false_values,
"""skipinitialspace""": self.skipinitialspace,
"""skiprows""": self.skiprows,
"""nrows""": self.nrows,
"""na_values""": self.na_values,
"""keep_default_na""": self.keep_default_na,
"""na_filter""": self.na_filter,
"""verbose""": self.verbose,
"""skip_blank_lines""": self.skip_blank_lines,
"""thousands""": self.thousands,
"""decimal""": self.decimal,
"""lineterminator""": self.lineterminator,
"""quotechar""": self.quotechar,
"""quoting""": self.quoting,
"""escapechar""": self.escapechar,
"""comment""": self.comment,
"""encoding""": self.encoding,
"""dialect""": self.dialect,
"""error_bad_lines""": self.error_bad_lines,
"""warn_bad_lines""": self.warn_bad_lines,
"""skipfooter""": self.skipfooter,
"""doublequote""": self.doublequote,
"""memory_map""": self.memory_map,
"""float_precision""": self.float_precision,
"""chunksize""": self.chunksize,
"""encoding_errors""": self.encoding_errors,
"""on_bad_lines""": self.on_bad_lines,
"""date_format""": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() ,_lowerCAmelCase ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class UpperCamelCase__ (datasets.ArrowBasedBuilder ):
'''simple docstring'''
_UpperCamelCase = CsvConfig
def UpperCamelCase_ ( self ):
return datasets.DatasetInfo(features=self.config.features )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
lowerCamelCase__ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowerCAmelCase ,(str, list, tuple) ):
lowerCamelCase__ = data_files
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = [files]
lowerCamelCase__ = [dl_manager.iter_files(_lowerCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={"""files""": files} )]
lowerCamelCase__ = []
for split_name, files in data_files.items():
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = [files]
lowerCamelCase__ = [dl_manager.iter_files(_lowerCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_lowerCAmelCase ,gen_kwargs={"""files""": files} ) )
return splits
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
if self.config.features is not None:
lowerCamelCase__ = self.config.features.arrow_schema
if all(not require_storage_cast(_lowerCAmelCase ) for feature in self.config.features.values() ):
# cheaper cast
lowerCamelCase__ = pa.Table.from_arrays([pa_table[field.name] for field in schema] ,schema=_lowerCAmelCase )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
lowerCamelCase__ = table_cast(_lowerCAmelCase ,_lowerCAmelCase )
return pa_table
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
lowerCamelCase__ = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
lowerCamelCase__ = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(_lowerCAmelCase ) else object
for name, dtype, feature in zip(schema.names ,schema.types ,self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCAmelCase ) ):
lowerCamelCase__ = pd.read_csv(_lowerCAmelCase ,iterator=_lowerCAmelCase ,dtype=_lowerCAmelCase ,**self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(_lowerCAmelCase ):
lowerCamelCase__ = pa.Table.from_pandas(_lowerCAmelCase )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_lowerCAmelCase )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(_lowerCAmelCase )}: {e}''' )
raise
| 50 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self ) -> Any:
A__ = tempfile.mkdtemp()
A__ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
A__ = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"image_std": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
A__ = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE__ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self , **SCREAMING_SNAKE_CASE__ ) -> List[Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self , **SCREAMING_SNAKE_CASE__ ) -> Tuple:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self , **SCREAMING_SNAKE_CASE__ ) -> str:
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def snake_case__ ( self ) -> Any:
A__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case__ ( self ) -> Dict:
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
A__ = self.get_image_processor()
A__ = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
processor_slow.save_pretrained(self.tmpdirname )
A__ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE__ )
A__ = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
processor_fast.save_pretrained(self.tmpdirname )
A__ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> Union[str, Any]:
A__ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
A__ = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
A__ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> List[Any]:
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
A__ = self.prepare_image_inputs()
A__ = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors="np" )
A__ = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case__ ( self ) -> List[Any]:
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
A__ = "lower newer"
A__ = processor(text=SCREAMING_SNAKE_CASE__ )
A__ = tokenizer(SCREAMING_SNAKE_CASE__ , padding="max_length" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case__ ( self ) -> int:
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
A__ = "lower newer"
A__ = self.prepare_image_inputs()
A__ = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
processor()
def snake_case__ ( self ) -> Union[str, Any]:
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A__ = processor.batch_decode(SCREAMING_SNAKE_CASE__ )
A__ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> Union[str, Any]:
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
A__ = "lower newer"
A__ = self.prepare_image_inputs()
A__ = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 104 | 0 |
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
print("\nThe shortest path matrix using Floyd Warshall algorithm\n")
for i in range(lowerCAmelCase_):
for j in range(lowerCAmelCase_):
if dist[i][j] != float("inf"):
print(int(dist[i][j]) , end="\t")
else:
print("INF" , end="\t")
print()
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Any = [[float("inf") for _ in range(lowerCAmelCase_)] for _ in range(lowerCAmelCase_)]
for i in range(lowerCAmelCase_):
for j in range(lowerCAmelCase_):
lowerCamelCase_ : List[Any] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(lowerCAmelCase_):
# looping through rows of graph array
for i in range(lowerCAmelCase_):
# looping through columns of graph array
for j in range(lowerCAmelCase_):
if (
dist[i][k] != float("inf")
and dist[k][j] != float("inf")
and dist[i][k] + dist[k][j] < dist[i][j]
):
lowerCamelCase_ : Any = dist[i][k] + dist[k][j]
_print_dist(lowerCAmelCase_ , lowerCAmelCase_)
return dist, v
if __name__ == "__main__":
__magic_name__ = int(input('''Enter number of vertices: '''))
__magic_name__ = int(input('''Enter number of edges: '''))
__magic_name__ = [[float('''inf''') for i in range(v)] for j in range(v)]
for i in range(v):
__magic_name__ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('''\nEdge ''', i + 1)
__magic_name__ = int(input('''Enter source:'''))
__magic_name__ = int(input('''Enter destination:'''))
__magic_name__ = float(input('''Enter weight:'''))
__magic_name__ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 73 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , a_ , a_=7 , a_=3 , a_=18 , a_=30 , a_=400 , a_=True , a_=None , a_=True , ):
lowerCamelCase_ : int = size if size is not None else {"height": 18, "width": 18}
lowerCamelCase_ : str = parent
lowerCamelCase_ : str = batch_size
lowerCamelCase_ : Tuple = num_channels
lowerCamelCase_ : Optional[int] = image_size
lowerCamelCase_ : List[str] = min_resolution
lowerCamelCase_ : Tuple = max_resolution
lowerCamelCase_ : Tuple = do_resize
lowerCamelCase_ : Dict = size
lowerCamelCase_ : List[str] = apply_ocr
def _UpperCamelCase ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase__ ( __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = LayoutLMvaImageProcessingTester(self )
@property
def _UpperCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , "do_resize" ) )
self.assertTrue(hasattr(a_ , "size" ) )
self.assertTrue(hasattr(a_ , "apply_ocr" ) )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
lowerCamelCase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
lowerCamelCase_ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , a_ )
self.assertIsInstance(encoding.boxes , a_ )
# Test batched
lowerCamelCase_ : int = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
# Test not batched input
lowerCamelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase_ : Any = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
lowerCamelCase_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase_ : Union[str, Any] = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# with apply_OCR = True
lowerCamelCase_ : Any = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowerCamelCase_ : Optional[Any] = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
lowerCamelCase_ : Optional[Any] = Image.open(ds[0]["file"] ).convert("RGB" )
lowerCamelCase_ : List[Any] = image_processing(a_ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowerCamelCase_ : List[Any] = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
lowerCamelCase_ : Tuple = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , a_ )
self.assertListEqual(encoding.boxes , a_ )
# with apply_OCR = False
lowerCamelCase_ : List[str] = LayoutLMvaImageProcessor(apply_ocr=a_ )
lowerCamelCase_ : List[str] = image_processing(a_ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 73 | 1 |
'''simple docstring'''
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( __lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = (KDPMaDiscreteScheduler,)
lowerCAmelCase_ = 10
def lowercase (self , **UpperCAmelCase ) -> Union[str, Any]:
_snake_case = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**_UpperCAmelCase )
return config
def lowercase (self ) -> Optional[Any]:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def lowercase (self ) -> Dict:
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def lowercase (self ) -> Tuple:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def lowercase (self ) -> Optional[int]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def lowercase (self ) -> str:
_snake_case = self.scheduler_classes[0]
_snake_case = self.get_scheduler_config(prediction_type="""v_prediction""" )
_snake_case = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
_snake_case = self.dummy_model()
_snake_case = self.dummy_sample_deter * scheduler.init_noise_sigma
_snake_case = sample.to(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_snake_case = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
_snake_case = model(_UpperCAmelCase , _UpperCAmelCase )
_snake_case = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_snake_case = output.prev_sample
_snake_case = torch.sum(torch.abs(_UpperCAmelCase ) )
_snake_case = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4e-0_7 ) < 1e-2
assert abs(result_mean.item() - 6.1_1_1_2e-1_0 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2e-0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0002 ) < 1e-3
def lowercase (self ) -> Any:
if torch_device == "mps":
return
_snake_case = self.scheduler_classes[0]
_snake_case = self.get_scheduler_config()
_snake_case = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
_snake_case = self.dummy_model()
_snake_case = self.dummy_sample_deter * scheduler.init_noise_sigma
_snake_case = sample.to(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_snake_case = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
_snake_case = model(_UpperCAmelCase , _UpperCAmelCase )
_snake_case = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_snake_case = output.prev_sample
_snake_case = torch.sum(torch.abs(_UpperCAmelCase ) )
_snake_case = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
def lowercase (self ) -> List[Any]:
if torch_device == "mps":
return
_snake_case = self.scheduler_classes[0]
_snake_case = self.get_scheduler_config()
_snake_case = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCAmelCase )
_snake_case = self.dummy_model()
_snake_case = self.dummy_sample_deter.to(_UpperCAmelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_snake_case = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
_snake_case = model(_UpperCAmelCase , _UpperCAmelCase )
_snake_case = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_snake_case = output.prev_sample
_snake_case = torch.sum(torch.abs(_UpperCAmelCase ) )
_snake_case = torch.mean(torch.abs(_UpperCAmelCase ) )
if str(_UpperCAmelCase ).startswith("""cpu""" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3 | 585 |
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
a = logging.get_logger(__name__)
a = {
'''tensor(bool)''': np.bool_,
'''tensor(int8)''': np.inta,
'''tensor(uint8)''': np.uinta,
'''tensor(int16)''': np.intaa,
'''tensor(uint16)''': np.uintaa,
'''tensor(int32)''': np.intaa,
'''tensor(uint32)''': np.uintaa,
'''tensor(int64)''': np.intaa,
'''tensor(uint64)''': np.uintaa,
'''tensor(float16)''': np.floataa,
'''tensor(float)''': np.floataa,
'''tensor(double)''': np.floataa,
}
class lowercase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCAmelCase : Dict=None , **_UpperCAmelCase : Optional[Any] ):
logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' )
_A = model
_A = kwargs.get('model_save_dir' , _UpperCAmelCase )
_A = kwargs.get('latest_model_name' , _UpperCAmelCase )
def __call__( self : Dict , **_UpperCAmelCase : List[Any] ):
_A = {k: np.array(_UpperCAmelCase ) for k, v in kwargs.items()}
return self.model.run(_UpperCAmelCase , _UpperCAmelCase )
@staticmethod
def lowerCAmelCase_ ( _UpperCAmelCase : Union[str, Path] , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : List[Any]=None ):
if provider is None:
logger.info('No onnxruntime provider specified, using CPUExecutionProvider' )
_A = 'CPUExecutionProvider'
return ort.InferenceSession(_UpperCAmelCase , providers=[provider] , sess_options=_UpperCAmelCase )
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Union[str, Path] , _UpperCAmelCase : Optional[str] = None , **_UpperCAmelCase : List[Any] ):
_A = file_name if file_name is not None else ONNX_WEIGHTS_NAME
_A = self.model_save_dir.joinpath(self.latest_model_name )
_A = Path(_UpperCAmelCase ).joinpath(_UpperCAmelCase )
try:
shutil.copyfile(_UpperCAmelCase , _UpperCAmelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
_A = self.model_save_dir.joinpath(_UpperCAmelCase )
if src_path.exists():
_A = Path(_UpperCAmelCase ).joinpath(_UpperCAmelCase )
try:
shutil.copyfile(_UpperCAmelCase , _UpperCAmelCase )
except shutil.SameFileError:
pass
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : List[str] , ):
if os.path.isfile(_UpperCAmelCase ):
logger.error(F'''Provided path ({save_directory}) should be a directory, not a file''' )
return
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
# saving model weights/files
self._save_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowerCAmelCase_ ( cls : Tuple , _UpperCAmelCase : Union[str, Path] , _UpperCAmelCase : Optional[Union[bool, str, None]] = None , _UpperCAmelCase : Optional[Union[str, None]] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional["ort.SessionOptions"] = None , **_UpperCAmelCase : Union[str, Any] , ):
_A = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_UpperCAmelCase ):
_A = OnnxRuntimeModel.load_model(
os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , provider=_UpperCAmelCase , sess_options=_UpperCAmelCase )
_A = Path(_UpperCAmelCase )
# load model from hub
else:
# download model
_A = hf_hub_download(
repo_id=_UpperCAmelCase , filename=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , revision=_UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , )
_A = Path(_UpperCAmelCase ).parent
_A = Path(_UpperCAmelCase ).name
_A = OnnxRuntimeModel.load_model(_UpperCAmelCase , provider=_UpperCAmelCase , sess_options=_UpperCAmelCase )
return cls(model=_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowerCAmelCase_ ( cls : List[Any] , _UpperCAmelCase : Union[str, Path] , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[str] = None , **_UpperCAmelCase : Tuple , ):
_A = None
if len(str(_UpperCAmelCase ).split('@' ) ) == 2:
_A , _A = model_id.split('@' )
return cls._from_pretrained(
model_id=_UpperCAmelCase , revision=_UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , **_UpperCAmelCase , )
| 7 | 0 |
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _A :
"""simple docstring"""
def __init__( self : int , A_ : Union[str, Any] , A_ : List[str]=13 , A_ : Any=30 , A_ : List[str]=2 , A_ : int=3 , A_ : Any=True , A_ : Optional[Any]=True , A_ : List[str]=32 , A_ : str=2 , A_ : List[str]=4 , A_ : str=37 , A_ : Tuple="gelu" , A_ : str=0.1 , A_ : List[str]=0.1 , A_ : Union[str, Any]=10 , A_ : Optional[Any]=0.02 , A_ : Any=3 , A_ : Union[str, Any]=0.6 , A_ : int=None , ) -> Any:
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = is_training
__snake_case = use_labels
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = mask_ratio
__snake_case = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
__snake_case = (image_size // patch_size) ** 2
__snake_case = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowercase ( self : Union[str, Any] ) -> str:
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = self.get_config()
return config, pixel_values, labels
def lowercase ( self : List[Any] ) -> Optional[Any]:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowercase ( self : str , A_ : Any , A_ : Any , A_ : Any ) -> int:
__snake_case = TFViTMAEModel(config=snake_case_ )
__snake_case = model(snake_case_ , training=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self : int , A_ : str , A_ : Any , A_ : List[Any] ) -> str:
__snake_case = TFViTMAEForPreTraining(snake_case_ )
__snake_case = model(snake_case_ , training=snake_case_ )
# expected sequence length = num_patches
__snake_case = (self.image_size // self.patch_size) ** 2
__snake_case = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
__snake_case = 1
__snake_case = TFViTMAEForPreTraining(snake_case_ )
__snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case = model(snake_case_ , training=snake_case_ )
__snake_case = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowercase ( self : Optional[int] ) -> Optional[int]:
__snake_case = self.prepare_config_and_inputs()
((__snake_case) , (__snake_case) , (__snake_case)) = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _A ( _a , _a , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : int = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
UpperCamelCase_ : List[str] = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
UpperCamelCase_ : List[Any] = False
UpperCamelCase_ : str = False
UpperCamelCase_ : Dict = False
UpperCamelCase_ : Dict = False
def lowercase ( self : Any ) -> Optional[int]:
__snake_case = TFViTMAEModelTester(self )
__snake_case = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 )
def lowercase ( self : Tuple ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def lowercase ( self : Tuple ) -> Union[str, Any]:
pass
def lowercase ( self : Union[str, Any] ) -> Any:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , tf.keras.layers.Layer ) )
def lowercase ( self : Optional[Any] ) -> Tuple:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(snake_case_ )
__snake_case = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case_ )
def lowercase ( self : Optional[int] ) -> str:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowercase ( self : List[Any] ) -> List[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case_ )
def lowercase ( self : int ) -> str:
# make the mask reproducible
np.random.seed(2 )
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = int((config.image_size // config.patch_size) ** 2 )
__snake_case = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__snake_case = model_class(snake_case_ )
__snake_case = self._prepare_for_class(snake_case_ , snake_case_ )
__snake_case = model(snake_case_ , noise=snake_case_ )
__snake_case = copy.deepcopy(self._prepare_for_class(snake_case_ , snake_case_ ) )
__snake_case = model(**snake_case_ , noise=snake_case_ )
__snake_case = outputs_dict[0].numpy()
__snake_case = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def lowercase ( self : Any ) -> str:
# make the mask reproducible
np.random.seed(2 )
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = int((config.image_size // config.patch_size) ** 2 )
__snake_case = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(A_ : Dict ):
__snake_case = {}
for k, v in inputs_dict.items():
if tf.is_tensor(snake_case_ ):
__snake_case = v.numpy()
else:
__snake_case = np.array(snake_case_ )
return inputs_np_dict
for model_class in self.all_model_classes:
__snake_case = model_class(snake_case_ )
__snake_case = self._prepare_for_class(snake_case_ , snake_case_ )
__snake_case = prepare_numpy_arrays(snake_case_ )
__snake_case = model(snake_case_ , noise=snake_case_ )
__snake_case = model(**snake_case_ , noise=snake_case_ )
self.assert_outputs_same(snake_case_ , snake_case_ )
def lowercase ( self : Any , A_ : Optional[int] , A_ : Optional[Any] , A_ : Optional[int] ) -> Any:
# make masks reproducible
np.random.seed(2 )
__snake_case = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
__snake_case = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__snake_case = tf.constant(snake_case_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
__snake_case = tf_noise
super().check_pt_tf_models(snake_case_ , snake_case_ , snake_case_ )
def lowercase ( self : str ) -> int:
# make mask reproducible
np.random.seed(2 )
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(snake_case_ )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(snake_case_ , snake_case_ ),)
if isinstance(snake_case_ , snake_case_ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(snake_case_ , '''_keras_serializable''' , snake_case_ )
}
__snake_case = int((config.image_size // config.patch_size) ** 2 )
__snake_case = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__snake_case = tf.convert_to_tensor(snake_case_ )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
__snake_case = main_layer_class(snake_case_ )
__snake_case = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
__snake_case = tf.keras.Model(snake_case_ , outputs=main_layer(snake_case_ ) )
__snake_case = model(snake_case_ )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case = os.path.join(snake_case_ , '''keras_model.h5''' )
model.save(snake_case_ )
__snake_case = tf.keras.models.load_model(
snake_case_ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(snake_case_ , tf.keras.Model )
__snake_case = model(snake_case_ )
self.assert_outputs_same(snake_case_ , snake_case_ )
@slow
def lowercase ( self : str ) -> Any:
# make mask reproducible
np.random.seed(2 )
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = int((config.image_size // config.patch_size) ** 2 )
__snake_case = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__snake_case = model_class(snake_case_ )
__snake_case = self._prepare_for_class(snake_case_ , snake_case_ )
__snake_case = model(snake_case_ , noise=snake_case_ )
if model_class.__name__ == "TFViTMAEModel":
__snake_case = outputs.last_hidden_state.numpy()
__snake_case = 0
else:
__snake_case = outputs.logits.numpy()
__snake_case = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ , saved_model=snake_case_ )
__snake_case = model_class.from_pretrained(snake_case_ )
__snake_case = model(snake_case_ , noise=snake_case_ )
if model_class.__name__ == "TFViTMAEModel":
__snake_case = after_outputs['''last_hidden_state'''].numpy()
__snake_case = 0
else:
__snake_case = after_outputs['''logits'''].numpy()
__snake_case = 0
__snake_case = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case_ , 1E-5 )
def lowercase ( self : Dict ) -> Optional[Any]:
# make mask reproducible
np.random.seed(2 )
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = int((config.image_size // config.patch_size) ** 2 )
__snake_case = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__snake_case = model_class(snake_case_ )
__snake_case = self._prepare_for_class(snake_case_ , snake_case_ )
__snake_case = model(snake_case_ , noise=snake_case_ )
__snake_case = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(snake_case_ )
__snake_case = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
__snake_case = model_class.from_config(model.config )
__snake_case = new_model(snake_case_ ) # Build model
new_model.set_weights(model.get_weights() )
__snake_case = new_model(snake_case_ , noise=snake_case_ )
self.assert_outputs_same(snake_case_ , snake_case_ )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.''' )
def lowercase ( self : Union[str, Any] ) -> Optional[int]:
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def lowercase ( self : List[str] ) -> List[Any]:
pass
@slow
def lowercase ( self : Union[str, Any] ) -> Optional[int]:
__snake_case = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(snake_case_ )
def SCREAMING_SNAKE_CASE ( ):
__snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_tf
@require_vision
class _A ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase ( self : Tuple ) -> int:
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def lowercase ( self : Optional[int] ) -> Dict:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
__snake_case = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=snake_case_ , return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
__snake_case = ViTMAEConfig()
__snake_case = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
__snake_case = np.random.uniform(size=(1, num_patches) )
# forward pass
__snake_case = model(**snake_case_ , noise=snake_case_ )
# verify the logits
__snake_case = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , snake_case_ )
__snake_case = tf.convert_to_tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , snake_case_ , atol=1E-4 ) | 720 | """simple docstring"""
import qiskit
def SCREAMING_SNAKE_CASE ( snake_case, snake_case):
__snake_case = qiskit.Aer.get_backend('''aer_simulator''')
# Create a Quantum Circuit acting on the q register
__snake_case = qiskit.QuantumCircuit(snake_case, snake_case)
# Map the quantum measurement to the classical bits
circuit.measure([0], [0])
# Execute the circuit on the simulator
__snake_case = qiskit.execute(snake_case, snake_case, shots=10_00)
# Return the histogram data of the results of the experiment.
return job.result().get_counts(snake_case)
if __name__ == "__main__":
print(F"""Total count for various states are: {single_qubit_measure(1, 1)}""") | 93 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
lowercase_ = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 |
def lowerCamelCase_ ( __UpperCamelCase ):
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
A_ = grid[0]
for row_n in range(1 , len(__UpperCamelCase ) ):
A_ = grid[row_n]
A_ = fill_row(__UpperCamelCase , __UpperCamelCase )
A_ = grid[row_n]
return grid[-1][-1]
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase ):
current_row[0] += row_above[0]
for cell_n in range(1 , len(__UpperCamelCase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod() | 141 | 0 |
"""simple docstring"""
from functools import reduce
__snake_case = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def _lowerCamelCase ( lowerCamelCase__ : str = N ):
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowerCamelCase__ , lowerCamelCase__ : str(int(lowerCamelCase__ ) * int(lowerCamelCase__ ) ) , n[i : i + 13] ) )
for i in range(len(lowerCamelCase__ ) - 12 ) )
if __name__ == "__main__":
print(F"{solution() = }") | 706 |
"""simple docstring"""
from typing import Dict
from .base import GenericTensor, Pipeline
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
"""simple docstring"""
def UpperCAmelCase__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , **lowerCamelCase__ ) -> Dict:
if tokenize_kwargs is None:
lowercase__ : List[Any] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
"""truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)""" )
lowercase__ : Dict = truncation
lowercase__ : Any = tokenize_kwargs
lowercase__ : List[str] = {}
if return_tensors is not None:
lowercase__ : str = return_tensors
return preprocess_params, {}, postprocess_params
def UpperCAmelCase__( self , lowerCamelCase__ , **lowerCamelCase__ ) -> Dict[str, GenericTensor]:
lowercase__ : Union[str, Any] = self.framework
lowercase__ : Optional[Any] = self.tokenizer(lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
return model_inputs
def UpperCAmelCase__( self , lowerCamelCase__ ) -> List[str]:
lowercase__ : str = self.model(**lowerCamelCase__ )
return model_outputs
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__=False ) -> Union[str, Any]:
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
return super().__call__(*lowerCamelCase__ , **lowerCamelCase__ ) | 128 | 0 |
def a (lowerCAmelCase__ ):
if not all(char in """01""" for char in bin_string ):
raise ValueError("""Non-binary value was passed to the function""" )
if not bin_string:
raise ValueError("""Empty string was passed to the function""" )
__a = """"""
while len(lowerCAmelCase__ ) % 3 != 0:
__a = """0""" + bin_string
__a = [
bin_string[index : index + 3]
for index in range(len(lowerCAmelCase__ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
__a = 0
for index, val in enumerate(lowerCAmelCase__ ):
oct_val += int(2 ** (2 - index) * int(lowerCAmelCase__ ) )
oct_string += str(lowerCAmelCase__ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 99 |
from math import pi, sqrt
def lowercase__( A ):
if num <= 0:
raise ValueError('math domain error' )
if num > 171.5:
raise OverflowError('math range error' )
elif num - int(A ) not in (0, 0.5):
raise NotImplementedError('num must be an integer or a half-integer' )
elif num == 0.5:
return sqrt(A )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowercase__( ):
assert gamma(0.5 ) == sqrt(A )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCamelCase : Optional[int] = 1.0
while num:
lowerCamelCase : List[Any] = float(input('Gamma of: '))
print(F"""gamma({num}) = {gamma(num)}""")
print('\nEnter 0 to exit...')
| 170 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
a_ = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def a__ ( _UpperCamelCase : List[str] ):
__lowerCamelCase = {}
with open(__lowerCAmelCase ,'''r''' ) as file:
for line_number, line in enumerate(__lowerCAmelCase ):
__lowerCamelCase = line.strip()
if line:
__lowerCamelCase = line.split()
__lowerCamelCase = line_number
__lowerCamelCase = words[0]
__lowerCamelCase = value
return result
def a__ ( _UpperCamelCase : Optional[Any] ,_UpperCamelCase : int ,_UpperCamelCase : List[str] ,_UpperCamelCase : str ,_UpperCamelCase : List[Any] ):
for attribute in key.split('''.''' ):
__lowerCamelCase = getattr(__lowerCAmelCase ,__lowerCAmelCase )
__lowerCamelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__lowerCAmelCase ):
__lowerCamelCase = PARAM_MAPPING[full_name.split('''.''' )[-1]]
__lowerCamelCase = '''param'''
if weight_type is not None and weight_type != "param":
__lowerCamelCase = getattr(__lowerCAmelCase ,__lowerCAmelCase ).shape
elif weight_type is not None and weight_type == "param":
__lowerCamelCase = hf_pointer
for attribute in hf_param_name.split('''.''' ):
__lowerCamelCase = getattr(__lowerCAmelCase ,__lowerCAmelCase )
__lowerCamelCase = shape_pointer.shape
# let's reduce dimension
__lowerCamelCase = value[0]
else:
__lowerCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__lowerCamelCase = value
elif weight_type == "weight_g":
__lowerCamelCase = value
elif weight_type == "weight_v":
__lowerCamelCase = value
elif weight_type == "bias":
__lowerCamelCase = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
__lowerCamelCase = getattr(__lowerCAmelCase ,__lowerCAmelCase )
__lowerCamelCase = value
else:
__lowerCamelCase = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def a__ ( _UpperCamelCase : Optional[Any] ,_UpperCamelCase : Optional[Any] ,_UpperCamelCase : int ,_UpperCamelCase : int ,_UpperCamelCase : int ):
__lowerCamelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__lowerCAmelCase ):
__lowerCamelCase = PARAM_MAPPING[full_name.split('''.''' )[-1]]
__lowerCamelCase = '''param'''
if weight_type is not None and weight_type != "param":
__lowerCamelCase = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__lowerCamelCase = '''.'''.join([key, hf_param_name] )
else:
__lowerCamelCase = key
__lowerCamelCase = value if '''lm_head''' in full_key else value[0]
a_ = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def a__ ( _UpperCamelCase : Tuple ,_UpperCamelCase : Any ,_UpperCamelCase : str=None ,_UpperCamelCase : Optional[int]=None ):
__lowerCamelCase = False
for key, mapped_key in MAPPING.items():
__lowerCamelCase = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__lowerCamelCase = True
if "*" in mapped_key:
__lowerCamelCase = name.split(__lowerCAmelCase )[0].split('''.''' )[-2]
__lowerCamelCase = mapped_key.replace('''*''' ,__lowerCAmelCase )
if "weight_g" in name:
__lowerCamelCase = '''weight_g'''
elif "weight_v" in name:
__lowerCamelCase = '''weight_v'''
elif "bias" in name:
__lowerCamelCase = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCamelCase = '''weight'''
else:
__lowerCamelCase = None
if hf_dict is not None:
rename_dict(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
else:
set_recursively(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
return is_used
return is_used
def a__ ( _UpperCamelCase : Tuple ,_UpperCamelCase : List[Any] ,_UpperCamelCase : int ):
__lowerCamelCase = []
__lowerCamelCase = fairseq_model.state_dict()
__lowerCamelCase = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,hf_model.config.feat_extract_norm == '''group''' ,)
__lowerCamelCase = True
else:
__lowerCamelCase = load_wavaveca_layer(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
if not is_used:
unused_weights.append(__lowerCAmelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def a__ ( _UpperCamelCase : Any ,_UpperCamelCase : List[Any] ,_UpperCamelCase : Optional[int] ,_UpperCamelCase : Tuple ,_UpperCamelCase : Union[str, Any] ):
__lowerCamelCase = full_name.split('''conv_layers.''' )[-1]
__lowerCamelCase = name.split('''.''' )
__lowerCamelCase = int(items[0] )
__lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__lowerCAmelCase )
@torch.no_grad()
def a__ ( _UpperCamelCase : Optional[Any] ,_UpperCamelCase : Dict ,_UpperCamelCase : Optional[int]=None ,_UpperCamelCase : Any=None ,_UpperCamelCase : Any=True ,_UpperCamelCase : str=False ):
if config_path is not None:
__lowerCamelCase = WavaVecaConfig.from_pretrained(__lowerCAmelCase )
else:
__lowerCamelCase = WavaVecaConfig()
if is_seq_class:
__lowerCamelCase = read_txt_into_dict(__lowerCAmelCase )
__lowerCamelCase = idalabel
__lowerCamelCase = WavaVecaForSequenceClassification(__lowerCAmelCase )
__lowerCamelCase = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=1_60_00 ,padding_value=0 ,do_normalize=__lowerCAmelCase ,return_attention_mask=__lowerCAmelCase ,)
feature_extractor.save_pretrained(__lowerCAmelCase )
elif is_finetuned:
if dict_path:
__lowerCamelCase = Dictionary.load(__lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowerCamelCase = target_dict.pad_index
__lowerCamelCase = target_dict.bos_index
__lowerCamelCase = target_dict.eos_index
__lowerCamelCase = len(target_dict.symbols )
__lowerCamelCase = os.path.join(__lowerCAmelCase ,'''vocab.json''' )
if not os.path.isdir(__lowerCAmelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__lowerCAmelCase ) )
return
os.makedirs(__lowerCAmelCase ,exist_ok=__lowerCAmelCase )
__lowerCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
__lowerCamelCase = 0
__lowerCamelCase = 1
with open(__lowerCAmelCase ,'''w''' ,encoding='''utf-8''' ) as vocab_handle:
json.dump(__lowerCAmelCase ,__lowerCAmelCase )
__lowerCamelCase = WavaVecaCTCTokenizer(
__lowerCAmelCase ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token='''|''' ,do_lower_case=__lowerCAmelCase ,)
__lowerCamelCase = True if config.feat_extract_norm == '''layer''' else False
__lowerCamelCase = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=1_60_00 ,padding_value=0 ,do_normalize=__lowerCAmelCase ,return_attention_mask=__lowerCAmelCase ,)
__lowerCamelCase = WavaVecaProcessor(feature_extractor=__lowerCAmelCase ,tokenizer=__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
__lowerCamelCase = WavaVecaForCTC(__lowerCAmelCase )
else:
__lowerCamelCase = WavaVecaForPreTraining(__lowerCAmelCase )
if is_finetuned or is_seq_class:
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__lowerCamelCase = argparse.Namespace(task='''audio_pretraining''' )
__lowerCamelCase = fairseq.tasks.setup_task(__lowerCAmelCase )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ,task=__lowerCAmelCase )
__lowerCamelCase = model[0].eval()
recursively_load_weights(__lowerCAmelCase ,__lowerCAmelCase ,not is_finetuned )
hf_wavavec.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
a_ = parser.parse_args()
a_ = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 709 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
a_ = False
class __lowerCAmelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe.dual_guided(
prompt='''first prompt''' , image=__UpperCAmelCase , text_to_image_strength=0.75 , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__UpperCAmelCase )
__lowerCamelCase = VersatileDiffusionPipeline.from_pretrained(__UpperCAmelCase , torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = generator.manual_seed(0 )
__lowerCamelCase = pipe.dual_guided(
prompt='''first prompt''' , image=__UpperCAmelCase , text_to_image_strength=0.75 , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = '''cyberpunk 2077'''
__lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe.dual_guided(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , text_to_image_strength=0.75 , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
__lowerCamelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowerCamelCase = '''A painting of a squirrel eating a burger '''
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe.text_to_image(
prompt=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
__lowerCamelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowerCamelCase = pipe.image_variation(__UpperCAmelCase , generator=__UpperCAmelCase , output_type='''numpy''' ).images
__lowerCamelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 622 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def _lowerCAmelCase ( __snake_case : float , __snake_case : int ) -> float:
__A : int = u
for i in range(1 , __snake_case ):
__A : Optional[int] = temp * (u - i)
return temp
def _lowerCAmelCase ( ) -> None:
__A : Dict = int(input('enter the numbers of values: ' ) )
__A : list[list[float]] = []
for _ in range(__snake_case ):
y.append([] )
for i in range(__snake_case ):
for j in range(__snake_case ):
y[i].append(__snake_case )
__A : int = 0
print('enter the values of parameters in a list: ' )
__A : List[str] = list(map(__snake_case , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(__snake_case ):
__A : Tuple = float(input() )
__A : Tuple = int(input('enter the value to interpolate: ' ) )
__A : Dict = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , __snake_case ):
for j in range(n - i ):
__A : Dict = y[j + 1][i - 1] - y[j][i - 1]
__A : List[Any] = y[0][0]
for i in range(1 , __snake_case ):
summ += (ucal(__snake_case , __snake_case ) * y[0][i]) / math.factorial(__snake_case )
print(f'the value at {value} is {summ}' )
if __name__ == "__main__":
main() | 8 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {'''vocab_file''': '''spiece.model'''}
_lowerCamelCase = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
_lowerCamelCase = {
'''t5-small''': 5_12,
'''t5-base''': 5_12,
'''t5-large''': 5_12,
'''t5-3b''': 5_12,
'''t5-11b''': 5_12,
}
_lowerCamelCase = '''▁'''
class snake_case ( __UpperCAmelCase ):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self :int , _lowerCamelCase :Optional[Any] , _lowerCamelCase :Union[str, Any]="</s>" , _lowerCamelCase :List[Any]="<unk>" , _lowerCamelCase :Union[str, Any]="<pad>" , _lowerCamelCase :int=1_0_0 , _lowerCamelCase :Union[str, Any]=None , _lowerCamelCase :Optional[Dict[str, Any]] = None , _lowerCamelCase :int=True , **_lowerCamelCase :List[Any] , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [f'''<extra_id_{i}>''' for i in range(_lowerCamelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__SCREAMING_SNAKE_CASE : Optional[int] = len(set(filter(lambda _lowerCamelCase : bool('''extra_id''' in str(_lowerCamelCase ) ) , _lowerCamelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
if legacy:
logger.warning_once(
f'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
''' read the related pull request available at https://github.com/huggingface/transformers/pull/24565''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = legacy
__SCREAMING_SNAKE_CASE : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , extra_ids=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , legacy=_lowerCamelCase , **_lowerCamelCase , )
__SCREAMING_SNAKE_CASE : Tuple = vocab_file
__SCREAMING_SNAKE_CASE : List[str] = extra_ids
__SCREAMING_SNAKE_CASE : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCamelCase )
@staticmethod
def SCREAMING_SNAKE_CASE_ ( _lowerCamelCase :str , _lowerCamelCase :Union[str, Any] , _lowerCamelCase :int ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
__SCREAMING_SNAKE_CASE : Any = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , _lowerCamelCase , )
return max_model_length
@property
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
return self.sp_model.get_piece_size() + self._extra_ids
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
__SCREAMING_SNAKE_CASE : str = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] , _lowerCamelCase :List[int] , _lowerCamelCase :Optional[List[int]] = None , _lowerCamelCase :bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_lowerCamelCase )) + [1]
return ([0] * len(_lowerCamelCase )) + [1] + ([0] * len(_lowerCamelCase )) + [1]
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
return list(
set(filter(lambda _lowerCamelCase : bool(re.search(r'''<extra_id_\d+>''' , _lowerCamelCase ) ) is not None , self.additional_special_tokens ) ) )
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
return [self._convert_token_to_id(_lowerCamelCase ) for token in self.get_sentinel_tokens()]
def SCREAMING_SNAKE_CASE_ ( self :Any , _lowerCamelCase :List[int] ):
if len(_lowerCamelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
''' eos tokens being added.''' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] , _lowerCamelCase :List[int] , _lowerCamelCase :Optional[List[int]] = None ):
__SCREAMING_SNAKE_CASE : List[str] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def SCREAMING_SNAKE_CASE_ ( self :List[Any] , _lowerCamelCase :List[int] , _lowerCamelCase :Optional[List[int]] = None ):
__SCREAMING_SNAKE_CASE : Optional[Any] = self._add_eos_if_not_present(_lowerCamelCase )
if token_ids_a is None:
return token_ids_a
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self._add_eos_if_not_present(_lowerCamelCase )
return token_ids_a + token_ids_a
def __getstate__( self :Union[str, Any] ):
__SCREAMING_SNAKE_CASE : Any = self.__dict__.copy()
__SCREAMING_SNAKE_CASE : List[str] = None
return state
def __setstate__( self :Optional[Any] , _lowerCamelCase :List[str] ):
__SCREAMING_SNAKE_CASE : Tuple = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__SCREAMING_SNAKE_CASE : Optional[int] = {}
__SCREAMING_SNAKE_CASE : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE_ ( self :int , _lowerCamelCase :"TextInput" , **_lowerCamelCase :str ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
__SCREAMING_SNAKE_CASE : Dict = SPIECE_UNDERLINE + text.replace(_lowerCamelCase , ''' ''' )
return super().tokenize(_lowerCamelCase , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , _lowerCamelCase :List[Any] , **_lowerCamelCase :Dict ):
if not self.legacy:
__SCREAMING_SNAKE_CASE : str = text.startswith(_lowerCamelCase )
if is_first:
__SCREAMING_SNAKE_CASE : str = text[1:]
__SCREAMING_SNAKE_CASE : Tuple = self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
if not self.legacy and not is_first and not text.startswith(''' ''' ) and tokens[0].startswith(_lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , _lowerCamelCase :Optional[Any] ):
if token.startswith('''<extra_id_''' ):
__SCREAMING_SNAKE_CASE : Tuple = re.match(r'''<extra_id_(\d+)>''' , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :str , _lowerCamelCase :Optional[int] ):
if index < self.sp_model.get_piece_size():
__SCREAMING_SNAKE_CASE : List[Any] = self.sp_model.IdToPiece(_lowerCamelCase )
else:
__SCREAMING_SNAKE_CASE : Dict = f'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def SCREAMING_SNAKE_CASE_ ( self :Tuple , _lowerCamelCase :Any ):
__SCREAMING_SNAKE_CASE : str = []
__SCREAMING_SNAKE_CASE : Dict = ''''''
__SCREAMING_SNAKE_CASE : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowerCamelCase ) + token
__SCREAMING_SNAKE_CASE : List[str] = True
__SCREAMING_SNAKE_CASE : str = []
else:
current_sub_tokens.append(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = False
out_string += self.sp_model.decode(_lowerCamelCase )
return out_string.strip()
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , _lowerCamelCase :str , _lowerCamelCase :Optional[str] = None ):
if not os.path.isdir(_lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__SCREAMING_SNAKE_CASE : List[str] = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , '''wb''' ) as fi:
__SCREAMING_SNAKE_CASE : Any = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
| 674 | 0 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
UpperCAmelCase__ = logging.getLogger(__name__)
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default='''tab_fact''' , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(
default='''tab_fact''' , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} , )
UpperCamelCase = field(
default=1_0_2_4 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''A csv or a json file containing the training data.'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''A csv or a json file containing the validation data.'''} )
UpperCamelCase = field(default=A , metadata={'''help''': '''A csv or a json file containing the test data.'''} )
def _lowerCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.')
else:
_UpperCAmelCase = self.train_file.split('.')[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
_UpperCAmelCase = self.validation_file.split('.')[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default=A , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCamelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def A ( ) -> Dict:
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
_UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(_UpperCAmelCase )
datasets.utils.logging.set_verbosity(_UpperCAmelCase )
transformers.utils.logging.set_verbosity(_UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
_UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
_UpperCAmelCase = {'train': data_args.train_file, 'validation': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
_UpperCAmelCase = data_args.train_file.split('.' )[-1]
_UpperCAmelCase = data_args.test_file.split('.' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
_UpperCAmelCase = data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.' )
for key in data_files.keys():
logger.info(F"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith('.csv' ):
# Loading a dataset from local csv files
_UpperCAmelCase = load_dataset('csv' , data_files=_UpperCAmelCase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
_UpperCAmelCase = load_dataset('json' , data_files=_UpperCAmelCase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
_UpperCAmelCase = raw_datasets['train'].features['label'].names
_UpperCAmelCase = len(_UpperCAmelCase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
_UpperCAmelCase = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=_UpperCAmelCase , )
_UpperCAmelCase = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
_UpperCAmelCase = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_UpperCAmelCase = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
_UpperCAmelCase = {'Refused': 0, 'Entailed': 1}
_UpperCAmelCase = {0: 'Refused', 1: 'Entailed'}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
_UpperCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(_UpperCAmelCase : Union[str, Any] ):
# Tokenize the texts
def _convert_table_text_to_pandas(_UpperCAmelCase : List[Any] ):
_UpperCAmelCase = [_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )]
_UpperCAmelCase = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
_UpperCAmelCase = examples['statement']
_UpperCAmelCase = list(map(_convert_table_text_to_pandas , examples['table_text'] ) )
_UpperCAmelCase = tokenizer(_UpperCAmelCase , _UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase )
_UpperCAmelCase = examples['label']
return result
with training_args.main_process_first(desc='dataset map pre-processing' ):
_UpperCAmelCase = raw_datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
_UpperCAmelCase = raw_datasets['train']
if data_args.max_train_samples is not None:
_UpperCAmelCase = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
_UpperCAmelCase = raw_datasets['validation']
if data_args.max_eval_samples is not None:
_UpperCAmelCase = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('--do_predict requires a test dataset' )
_UpperCAmelCase = raw_datasets['test']
if data_args.max_predict_samples is not None:
_UpperCAmelCase = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(_UpperCAmelCase ) ) , 3 ):
logger.info(F"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_UpperCAmelCase : EvalPrediction ):
_UpperCAmelCase = p.predictions[0] if isinstance(p.predictions , _UpperCAmelCase ) else p.predictions
_UpperCAmelCase = np.argmax(_UpperCAmelCase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_UpperCAmelCase = default_data_collator
elif training_args.fpaa:
_UpperCAmelCase = DataCollatorWithPadding(_UpperCAmelCase , pad_to_multiple_of=8 )
else:
_UpperCAmelCase = None
# Initialize our Trainer
_UpperCAmelCase = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_UpperCAmelCase , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
_UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
_UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCAmelCase = last_checkpoint
_UpperCAmelCase = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
_UpperCAmelCase = train_result.metrics
_UpperCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCAmelCase )
)
_UpperCAmelCase = min(_UpperCAmelCase , len(_UpperCAmelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , _UpperCAmelCase )
trainer.save_metrics('train' , _UpperCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_UpperCAmelCase = trainer.evaluate(eval_dataset=_UpperCAmelCase )
_UpperCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_UpperCAmelCase )
_UpperCAmelCase = min(_UpperCAmelCase , len(_UpperCAmelCase ) )
trainer.log_metrics('eval' , _UpperCAmelCase )
trainer.save_metrics('eval' , _UpperCAmelCase )
if training_args.do_predict:
logger.info('*** Predict ***' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
_UpperCAmelCase = predict_dataset.remove_columns('label' )
_UpperCAmelCase = trainer.predict(_UpperCAmelCase , metric_key_prefix='predict' ).predictions
_UpperCAmelCase = np.argmax(_UpperCAmelCase , axis=1 )
_UpperCAmelCase = os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
logger.info('***** Predict Results *****' )
writer.write('index\tprediction\n' )
for index, item in enumerate(_UpperCAmelCase ):
_UpperCAmelCase = label_list[item]
writer.write(F"{index}\t{item}\n" )
_UpperCAmelCase = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCAmelCase )
else:
trainer.create_model_card(**_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[int] ) -> Tuple:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 639 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = "https://openaipublic.azureedge.net/jukebox/models/"
UpperCAmelCase__ = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def A ( _UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
_UpperCAmelCase = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
_UpperCAmelCase = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_UpperCAmelCase = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
_UpperCAmelCase = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def A ( _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = {}
import re
_UpperCAmelCase = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_conv_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_encoder_block_conv_in.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_encoder_block_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_encoder_block_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_encoder_block_proj_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_proj_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
_UpperCAmelCase = re_encoder_block_proj_out.sub(_UpperCAmelCase , _UpperCAmelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_conv_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_decoder_block_conv_out.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_decoder_block_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_decoder_block_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_decoder_block_proj_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_proj_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
_UpperCAmelCase = re_decoder_block_proj_in.sub(_UpperCAmelCase , _UpperCAmelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_conv_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCAmelCase = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_prior_cond_conv_out.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_prior_cond_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_prior_cond_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_prior_cond_proj_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_proj_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
_UpperCAmelCase = re_prior_cond_proj_in.sub(_UpperCAmelCase , _UpperCAmelCase )
# keep original key
else:
_UpperCAmelCase = original_key
_UpperCAmelCase = replace_key(_UpperCAmelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
_UpperCAmelCase = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
_UpperCAmelCase = original_key
_UpperCAmelCase = original_key
_UpperCAmelCase = value
return new_dict
@torch.no_grad()
def A ( _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Dict=None ) -> Dict:
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
_UpperCAmelCase = requests.get(F"{PREFIX}{file}" , allow_redirects=_UpperCAmelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_UpperCAmelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , 'wb' ).write(r.content )
_UpperCAmelCase = MODEL_MAPPING[model_name.split('/' )[-1]]
_UpperCAmelCase = JukeboxConfig.from_pretrained(_UpperCAmelCase )
_UpperCAmelCase = JukeboxModel(_UpperCAmelCase )
_UpperCAmelCase = []
_UpperCAmelCase = {}
for i, dict_name in enumerate(_UpperCAmelCase ):
_UpperCAmelCase = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )['model']
_UpperCAmelCase = {}
for k in old_dic.keys():
if k.endswith('.b' ):
_UpperCAmelCase = old_dic[k]
elif k.endswith('.w' ):
_UpperCAmelCase = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_UpperCAmelCase = old_dic[k]
else:
_UpperCAmelCase = old_dic[k]
_UpperCAmelCase = 'vqvae' if i == 0 else F"priors.{3 - i}"
_UpperCAmelCase = fix_jukebox_keys(_UpperCAmelCase , model.state_dict() , _UpperCAmelCase , _UpperCAmelCase )
weight_dict.append(_UpperCAmelCase )
_UpperCAmelCase = weight_dict.pop(0 )
model.vqvae.load_state_dict(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , 'w' ) as txtfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
return weight_dict
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
UpperCAmelCase__ = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 639 | 1 |
"""simple docstring"""
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = [0] * len(_lowercase )
for i in range(1 ,len(_lowercase ) ):
# use last results for better performance - dynamic programming
UpperCamelCase = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
UpperCamelCase = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
UpperCamelCase = j
return prefix_result
def __snake_case ( _lowercase ):
"""simple docstring"""
return max(prefix_function(_lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 34 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
def A__ (snake_case : str ) -> YolosConfig:
__UpperCamelCase : Dict = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
__UpperCamelCase : Union[str, Any] = 1_92
__UpperCamelCase : Optional[int] = 7_68
__UpperCamelCase : List[Any] = 12
__UpperCamelCase : Optional[int] = 3
__UpperCamelCase : Optional[Any] = [8_00, 13_33]
__UpperCamelCase : Optional[Any] = False
elif yolos_name == "yolos_s_dWr":
__UpperCamelCase : Union[str, Any] = 3_30
__UpperCamelCase : List[Any] = 14
__UpperCamelCase : Dict = 6
__UpperCamelCase : Optional[int] = 13_20
elif "yolos_s" in yolos_name:
__UpperCamelCase : Dict = 3_84
__UpperCamelCase : Optional[int] = 15_36
__UpperCamelCase : Optional[int] = 12
__UpperCamelCase : List[str] = 6
elif "yolos_b" in yolos_name:
__UpperCamelCase : List[Any] = [8_00, 13_44]
__UpperCamelCase : Optional[Any] = 91
__UpperCamelCase : Union[str, Any] = """huggingface/label-files"""
__UpperCamelCase : List[Any] = """coco-detection-id2label.json"""
__UpperCamelCase : Any = json.load(open(hf_hub_download(snake_case , snake_case , repo_type="""dataset""" ) , """r""" ) )
__UpperCamelCase : Tuple = {int(snake_case ): v for k, v in idalabel.items()}
__UpperCamelCase : str = idalabel
__UpperCamelCase : List[Any] = {v: k for k, v in idalabel.items()}
return config
def A__ (snake_case : dict , snake_case : YolosConfig , snake_case : bool = False ) -> List[Any]:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__UpperCamelCase : Tuple = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
__UpperCamelCase : str = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__UpperCamelCase : List[Any] = in_proj_weight[: config.hidden_size, :]
__UpperCamelCase : Union[str, Any] = in_proj_bias[: config.hidden_size]
__UpperCamelCase : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__UpperCamelCase : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__UpperCamelCase : Tuple = in_proj_weight[-config.hidden_size :, :]
__UpperCamelCase : List[Any] = in_proj_bias[-config.hidden_size :]
def A__ (snake_case : str ) -> str:
if "backbone" in name:
__UpperCamelCase : str = name.replace("""backbone""" , """vit""" )
if "cls_token" in name:
__UpperCamelCase : Any = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "det_token" in name:
__UpperCamelCase : Tuple = name.replace("""det_token""" , """embeddings.detection_tokens""" )
if "mid_pos_embed" in name:
__UpperCamelCase : int = name.replace("""mid_pos_embed""" , """encoder.mid_position_embeddings""" )
if "pos_embed" in name:
__UpperCamelCase : int = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__UpperCamelCase : Tuple = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "blocks" in name:
__UpperCamelCase : Any = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
__UpperCamelCase : Dict = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
__UpperCamelCase : str = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
__UpperCamelCase : Union[str, Any] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__UpperCamelCase : Optional[int] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__UpperCamelCase : Tuple = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__UpperCamelCase : Union[str, Any] = name.replace("""mlp.fc2""" , """output.dense""" )
if "class_embed" in name:
__UpperCamelCase : Tuple = name.replace("""class_embed""" , """class_labels_classifier""" )
if "bbox_embed" in name:
__UpperCamelCase : Dict = name.replace("""bbox_embed""" , """bbox_predictor""" )
if "vit.norm" in name:
__UpperCamelCase : Dict = name.replace("""vit.norm""" , """vit.layernorm""" )
return name
def A__ (snake_case : dict , snake_case : YolosForObjectDetection ) -> dict:
for key in orig_state_dict.copy().keys():
__UpperCamelCase : Dict = orig_state_dict.pop(snake_case )
if "qkv" in key:
__UpperCamelCase : Optional[int] = key.split(""".""" )
__UpperCamelCase : Union[str, Any] = int(key_split[2] )
__UpperCamelCase : Any = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
__UpperCamelCase : Tuple = val[:dim, :]
__UpperCamelCase : str = val[
dim : dim * 2, :
]
__UpperCamelCase : int = val[-dim:, :]
else:
__UpperCamelCase : Tuple = val[:dim]
__UpperCamelCase : Any = val[dim : dim * 2]
__UpperCamelCase : str = val[-dim:]
else:
__UpperCamelCase : Optional[int] = val
return orig_state_dict
def A__ () -> torch.Tensor:
__UpperCamelCase : Any = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__UpperCamelCase : Any = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return im
@torch.no_grad()
def A__ (snake_case : str , snake_case : str , snake_case : str , snake_case : bool = False ) -> Optional[Any]:
__UpperCamelCase : List[str] = get_yolos_config(snake_case )
# load original state_dict
__UpperCamelCase : Tuple = torch.load(snake_case , map_location="""cpu""" )["""model"""]
# load 🤗 model
__UpperCamelCase : Dict = YolosForObjectDetection(snake_case )
model.eval()
__UpperCamelCase : int = convert_state_dict(snake_case , snake_case )
model.load_state_dict(snake_case )
# Check outputs on an image, prepared by YolosImageProcessor
__UpperCamelCase : Any = 8_00 if yolos_name != """yolos_ti""" else 5_12
__UpperCamelCase : int = YolosImageProcessor(format="""coco_detection""" , size=snake_case )
__UpperCamelCase : Any = image_processor(images=prepare_img() , return_tensors="""pt""" )
__UpperCamelCase : List[Any] = model(**snake_case )
__UpperCamelCase , __UpperCamelCase : Optional[int] = outputs.logits, outputs.pred_boxes
__UpperCamelCase , __UpperCamelCase : List[Any] = None, None
if yolos_name == "yolos_ti":
__UpperCamelCase : Dict = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
__UpperCamelCase : Union[str, Any] = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
__UpperCamelCase : Optional[Any] = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
__UpperCamelCase : str = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
__UpperCamelCase : Union[str, Any] = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
__UpperCamelCase : Tuple = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
__UpperCamelCase : str = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
__UpperCamelCase : Optional[Any] = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
__UpperCamelCase : Optional[int] = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
__UpperCamelCase : List[str] = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(F'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] , snake_case , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , snake_case , atol=1e-4 )
Path(snake_case ).mkdir(exist_ok=snake_case )
print(F'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case )
if push_to_hub:
__UpperCamelCase : Tuple = {
"""yolos_ti""": """yolos-tiny""",
"""yolos_s_200_pre""": """yolos-small""",
"""yolos_s_300_pre""": """yolos-small-300""",
"""yolos_s_dWr""": """yolos-small-dwr""",
"""yolos_base""": """yolos-base""",
}
print("""Pushing to the hub...""" )
__UpperCamelCase : int = model_mapping[yolos_name]
image_processor.push_to_hub(snake_case , organization="""hustvl""" )
model.push_to_hub(snake_case , organization="""hustvl""" )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
a__ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 279 | 0 |
'''simple docstring'''
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowercase__ : Tuple = logging.get_logger(__name__)
class lowerCamelCase ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ['''input_values''', '''attention_mask''']
def __init__( self : Union[str, Any] , UpperCAmelCase__ : str = 1 , UpperCAmelCase__ : Optional[Any] = 1_6000 , UpperCAmelCase__ : Optional[Any] = 0.0 , UpperCAmelCase__ : Optional[int] = False , UpperCAmelCase__ : Dict = 80 , UpperCAmelCase__ : Union[str, Any] = 16 , UpperCAmelCase__ : List[Any] = 64 , UpperCAmelCase__ : Any = "hann_window" , UpperCAmelCase__ : Optional[Any] = 1.0 , UpperCAmelCase__ : Tuple = 80 , UpperCAmelCase__ : Tuple = 7600 , UpperCAmelCase__ : Union[str, Any] = 1e-10 , UpperCAmelCase__ : Dict = 2 , UpperCAmelCase__ : List[Any] = True , **UpperCAmelCase__ : List[Any] , ) ->Tuple:
super().__init__(feature_size=a_ , sampling_rate=a_ , padding_value=a_ , **a_ )
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = return_attention_mask
UpperCAmelCase_ = num_mel_bins
UpperCAmelCase_ = hop_length
UpperCAmelCase_ = win_length
UpperCAmelCase_ = win_function
UpperCAmelCase_ = frame_signal_scale
UpperCAmelCase_ = fmin
UpperCAmelCase_ = fmax
UpperCAmelCase_ = mel_floor
UpperCAmelCase_ = reduction_factor
UpperCAmelCase_ = win_length * sampling_rate // 1000
UpperCAmelCase_ = hop_length * sampling_rate // 1000
UpperCAmelCase_ = optimal_fft_length(self.sample_size )
UpperCAmelCase_ = (self.n_fft // 2) + 1
UpperCAmelCase_ = window_function(window_length=self.sample_size , name=self.win_function , periodic=a_ )
UpperCAmelCase_ = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='''slaney''' , mel_scale='''slaney''' , )
if frame_signal_scale != 1.0:
warnings.warn(
'''The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers''' , a_ , )
if reduction_factor != 2.0:
warnings.warn(
'''The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers''' , a_ , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def lowerCAmelCase__ ( UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] = 0.0 ) ->List[str]:
if attention_mask is not None:
UpperCAmelCase_ = np.array(a_ , np.intaa )
UpperCAmelCase_ = []
for vector, length in zip(a_ , attention_mask.sum(-1 ) ):
UpperCAmelCase_ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
UpperCAmelCase_ = padding_value
normed_input_values.append(a_ )
else:
UpperCAmelCase_ = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def lowerCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase__ : Tuple , ) ->int:
UpperCAmelCase_ = spectrogram(
a_ , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='''log10''' , )
return log_mel_spec.T
def __call__( self : Optional[Any] , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Tuple = None , UpperCAmelCase__ : int = False , UpperCAmelCase__ : List[str] = None , UpperCAmelCase__ : Any = False , UpperCAmelCase__ : Dict = None , UpperCAmelCase__ : Any = None , UpperCAmelCase__ : Union[str, Any] = None , UpperCAmelCase__ : Any = None , **UpperCAmelCase__ : Dict , ) ->Optional[Any]:
if audio is None and audio_target is None:
raise ValueError('''You must provide either `audio` or `audio_target` values.''' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if audio is not None:
UpperCAmelCase_ = self._process_audio(
a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , **a_ , )
else:
UpperCAmelCase_ = None
if audio_target is not None:
UpperCAmelCase_ = self._process_audio(
a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , **a_ , )
if inputs is None:
return inputs_target
else:
UpperCAmelCase_ = inputs_target["input_values"]
UpperCAmelCase_ = inputs_target.get('''attention_mask''' )
if decoder_attention_mask is not None:
UpperCAmelCase_ = decoder_attention_mask
return inputs
def lowerCAmelCase__ ( self : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int = False , UpperCAmelCase__ : Optional[int] = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : int = False , UpperCAmelCase__ : Tuple = None , UpperCAmelCase__ : List[Any] = None , UpperCAmelCase__ : List[str] = None , **UpperCAmelCase__ : int , ) ->Optional[Any]:
UpperCAmelCase_ = isinstance(a_ , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
UpperCAmelCase_ = is_batched_numpy or (
isinstance(a_ , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase_ = [np.asarray(a_ , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(a_ , np.ndarray ):
UpperCAmelCase_ = np.asarray(a_ , dtype=np.floataa )
elif isinstance(a_ , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase_ = speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase_ = [speech]
# needed to make pad() work on spectrogram inputs
UpperCAmelCase_ = self.feature_size
# convert into correct format for padding
if is_target:
UpperCAmelCase_ = [self._extract_mel_features(a_ ) for waveform in speech]
UpperCAmelCase_ = BatchFeature({'''input_values''': features} )
UpperCAmelCase_ = self.num_mel_bins
else:
UpperCAmelCase_ = BatchFeature({'''input_values''': speech} )
UpperCAmelCase_ = self.pad(
a_ , padding=a_ , max_length=a_ , truncation=a_ , pad_to_multiple_of=a_ , return_attention_mask=a_ , **a_ , )
UpperCAmelCase_ = feature_size_hack
# convert input values to correct format
UpperCAmelCase_ = padded_inputs["input_values"]
if not isinstance(input_values[0] , np.ndarray ):
UpperCAmelCase_ = [np.asarray(a_ , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(a_ , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
UpperCAmelCase_ = [array.astype(np.floataa ) for array in input_values]
elif isinstance(a_ , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
UpperCAmelCase_ = input_values.astype(np.floataa )
# convert attention_mask to correct format
UpperCAmelCase_ = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
UpperCAmelCase_ = [np.asarray(a_ , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
UpperCAmelCase_ = (
attention_mask
if self._get_padding_strategies(a_ , max_length=a_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
UpperCAmelCase_ = self.zero_mean_unit_var_norm(
padded_inputs['''input_values'''] , attention_mask=a_ , padding_value=self.padding_value )
if return_tensors is not None:
UpperCAmelCase_ = padded_inputs.convert_to_tensors(a_ )
return padded_inputs
def lowerCAmelCase__ ( self : List[str] ) ->Any:
UpperCAmelCase_ = super().to_dict()
# Don't serialize these as they are derived from the other properties.
UpperCAmelCase_ = ["window", "mel_filters", "sample_size", "sample_stride", "n_fft", "n_freqs"]
for name in names:
if name in output:
del output[name]
return output
| 706 | '''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Dict ) ->List[str]:
UpperCAmelCase_ = tf.convert_to_tensor(
[
[
8.222_0991, # 3rd highest value; idx. 0
-0.562_0044,
5.2322_9752,
4.038_6393,
-6.879_8378,
-0.5478_5802,
-3.201_2153,
2.9277_7176,
1.8817_1953,
7.3534_1276, # 5th highest value; idx. 9
8.4320_7833, # 2nd highest value; idx. 10
-9.8571_1836,
-5.9620_9236,
-1.1303_9161,
-7.111_5294,
-0.836_9633,
-5.318_6408,
7.0642_7407,
0.8136_9344,
-0.8202_3817,
-5.917_9796,
0.5881_3443,
-6.9977_8438,
4.7155_1189,
-0.1877_1637,
7.4402_0759, # 4th highest value; idx. 25
9.3845_0987, # 1st highest value; idx. 26
2.1266_2941,
-9.3256_2038,
2.3565_2522,
], # cummulative prob of 5 highest values <= 0.6
[
0.5842_5518,
4.5313_9238,
-5.5751_0464,
-6.2803_0699,
-7.1952_9503,
-4.0212_2551,
1.3933_7037,
-6.0670_7057,
1.5948_0517,
-9.64_3119,
0.0390_7799,
0.6723_1762,
-8.8820_6726,
6.2711_5922, # 4th highest value; idx. 13
2.2852_0723,
4.8276_7506,
4.3042_1368,
8.827_5313, # 2nd highest value; idx. 17
5.4402_9958, # 5th highest value; idx. 18
-4.473_5794,
7.3857_9536, # 3rd highest value; idx. 20
-2.9105_1663,
2.6194_6077,
-2.567_4762,
-9.4895_9302,
-4.0292_2645,
-1.3541_6918,
9.6770_2323, # 1st highest value; idx. 27
-5.8947_8553,
1.8537_0467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
UpperCAmelCase_ = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
UpperCAmelCase_ = tf.convert_to_tensor(
[8.22_2099, 7.353_4126, 8.43_2078, 7.440_2075, 9.3_8451, 6.27_1159, 8.82_7531, 5.440_2995, 7.385_7956, 9.67_7023] , dtype=tf.floataa , ) # expected non filtered values as noted above
UpperCAmelCase_ = tf_top_k_top_p_filtering(UpperCAmelCase__ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
UpperCAmelCase_ = output[output != -float('''inf''' )]
UpperCAmelCase_ = tf.cast(
tf.where(tf.not_equal(UpperCAmelCase__ , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1e-12 )
tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ )
@require_tf
class lowerCamelCase ( unittest.TestCase , lowerCamelCase ):
'''simple docstring'''
if is_tf_available():
lowerCAmelCase__ = {
'''AutoModelForCausalLM''': TFAutoModelForCausalLM,
'''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq,
'''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM,
'''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq,
'''LogitsProcessorList''': TFLogitsProcessorList,
'''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor,
'''create_tensor_fn''': tf.convert_to_tensor,
'''floats_tensor''': floats_tensor,
'''return_tensors''': '''tf''',
}
@slow
def lowerCAmelCase__ ( self : Dict ) ->Optional[Any]:
# TF-only test: tf.saved_model export
UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
UpperCAmelCase_ = 2
UpperCAmelCase_ = 2
class lowerCamelCase ( tf.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase__ : List[str] ) ->Dict:
super(UpperCAmelCase__ , self ).__init__()
UpperCAmelCase_ = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=UpperCAmelCase__ , )
def lowerCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] ) ->int:
UpperCAmelCase_ = self.model.generate(
input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , )
return {"sequences": outputs["sequences"]}
UpperCAmelCase_ = [[2, 0], [102, 103]]
UpperCAmelCase_ = [[1, 0], [1, 1]]
UpperCAmelCase_ = DummyModel(model=UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(UpperCAmelCase__ , UpperCAmelCase__ , signatures={'''serving_default''': dummy_model.serving} )
UpperCAmelCase_ = tf.saved_model.load(UpperCAmelCase__ ).signatures['''serving_default''']
for batch_size in range(1 , len(UpperCAmelCase__ ) + 1 ):
UpperCAmelCase_ = {
'''input_ids''': tf.constant(dummy_input_ids[:batch_size] ),
'''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ),
}
UpperCAmelCase_ = serving_func(**UpperCAmelCase__ )['''sequences''']
UpperCAmelCase_ = test_model.generate(**UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ )
tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def lowerCAmelCase__ ( self : Tuple ) ->Optional[int]:
# TF-only test: tf.saved_model export
UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
UpperCAmelCase_ = 1
UpperCAmelCase_ = 2
class lowerCamelCase ( tf.Module ):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase__ : int ) ->List[str]:
super(UpperCAmelCase__ , self ).__init__()
UpperCAmelCase_ = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=UpperCAmelCase__ , )
def lowerCAmelCase__ ( self : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ) ->int:
UpperCAmelCase_ = self.model.generate(
input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , )
return {"sequences": outputs["sequences"]}
UpperCAmelCase_ = [[2], [102, 103]]
UpperCAmelCase_ = [[1], [1, 1]]
UpperCAmelCase_ = DummyModel(model=UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(UpperCAmelCase__ , UpperCAmelCase__ , signatures={'''serving_default''': dummy_model.serving} )
UpperCAmelCase_ = tf.saved_model.load(UpperCAmelCase__ ).signatures['''serving_default''']
for input_row in range(len(UpperCAmelCase__ ) ):
UpperCAmelCase_ = {
'''input_ids''': tf.constant([dummy_input_ids[input_row]] ),
'''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ),
}
UpperCAmelCase_ = serving_func(**UpperCAmelCase__ )['''sequences''']
UpperCAmelCase_ = test_model.generate(**UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ )
tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
@require_tensorflow_text
def lowerCAmelCase__ ( self : Optional[Any] ) ->int:
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=UpperCAmelCase__ )
class lowerCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[str] ) ->Any:
super().__init__()
UpperCAmelCase_ = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(UpperCAmelCase__ , '''spiece.model''' ) , '''rb''' ).read() )
UpperCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
def lowerCAmelCase__ ( self : Any , UpperCAmelCase__ : Union[str, Any] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : str ) ->List[str]:
UpperCAmelCase_ = self.tokenizer.tokenize(UpperCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = text.pad_model_inputs(
UpperCAmelCase__ , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
UpperCAmelCase_ = self.model.generate(input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
return self.tokenizer.detokenize(UpperCAmelCase__ )
UpperCAmelCase_ = CompleteSentenceTransformer()
UpperCAmelCase_ = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' )
UpperCAmelCase_ = complete_model(UpperCAmelCase__ )
UpperCAmelCase_ = tf.keras.Model(UpperCAmelCase__ , UpperCAmelCase__ )
keras_model.save(UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Optional[int] ) ->Tuple:
# Has PT equivalent: this test relies on random sampling
UpperCAmelCase_ = {
'''do_sample''': True,
'''num_beams''': 1,
'''top_p''': 0.7,
'''top_k''': 10,
'''temperature''': 0.7,
}
UpperCAmelCase_ = 14
UpperCAmelCase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
UpperCAmelCase_ = '''Hello, my dog is cute and'''
UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , return_tensors='''tf''' )
UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
UpperCAmelCase_ = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
UpperCAmelCase_ = model.generate(**UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
UpperCAmelCase_ = [638, 198]
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
UpperCAmelCase_ = model.generate(**UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def lowerCAmelCase__ ( self : Optional[int] ) ->List[str]:
# Has PT equivalent: ample use of framework-specific code
UpperCAmelCase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
UpperCAmelCase_ = '''Hugging Face is a technology company based in New York and Paris.'''
UpperCAmelCase_ = bart_tokenizer(UpperCAmelCase__ , return_tensors='''tf''' ).input_ids
UpperCAmelCase_ = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
UpperCAmelCase_ = bart_model.generate(UpperCAmelCase__ ).numpy()
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any]=None , **UpperCAmelCase__ : int ) ->List[str]:
return super().call(UpperCAmelCase__ , **UpperCAmelCase__ )
UpperCAmelCase_ = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
UpperCAmelCase_ = bart_model.generate(UpperCAmelCase__ , foo='''bar''' ).numpy()
self.assertTrue(np.array_equal(UpperCAmelCase__ , UpperCAmelCase__ ) )
class lowerCamelCase ( bart_model.model.encoder.__class__ ):
'''simple docstring'''
def lowerCAmelCase__ ( self : str , UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Dict ) ->Any:
return super().call(UpperCAmelCase__ , **UpperCAmelCase__ )
UpperCAmelCase_ = FakeEncoder(bart_model.config , bart_model.model.shared )
UpperCAmelCase_ = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
UpperCAmelCase_ = bart_model.generate(UpperCAmelCase__ ).numpy()
with self.assertRaises(UpperCAmelCase__ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(UpperCAmelCase__ , foo='''bar''' )
| 43 | 0 |
import numpy as np
UpperCAmelCase : int = [
["""a""", """b""", """c""", """d""", """e"""],
["""f""", """g""", """h""", """i""", """k"""],
["""l""", """m""", """n""", """o""", """p"""],
["""q""", """r""", """s""", """t""", """u"""],
["""v""", """w""", """x""", """y""", """z"""],
]
class __lowerCAmelCase :
def __init__( self ) -> None:
'''simple docstring'''
a__ : Dict =np.array(lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ ) -> np.ndarray:
'''simple docstring'''
a__ , a__ : List[str] =np.where(letter == self.SQUARE )
a__ : str =np.concatenate([indexa + 1, indexa + 1] )
return indexes
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
'''simple docstring'''
a__ : Dict =self.SQUARE[indexa - 1, indexa - 1]
return letter
def _lowercase ( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
a__ : str =message.lower()
a__ : List[str] =message.replace(" " , "" )
a__ : Tuple =message.replace("j" , "i" )
a__ : int =np.empty((2, len(lowerCAmelCase__ )) )
for letter_index in range(len(lowerCAmelCase__ ) ):
a__ : Union[str, Any] =self.letter_to_numbers(message[letter_index] )
a__ : List[str] =numbers[0]
a__ : Union[str, Any] =numbers[1]
a__ : List[str] =first_step.reshape(2 * len(lowerCAmelCase__ ) )
a__ : Optional[Any] =""
for numbers_index in range(len(lowerCAmelCase__ ) ):
a__ : Optional[int] =int(second_step[numbers_index * 2] )
a__ : Optional[int] =int(second_step[(numbers_index * 2) + 1] )
a__ : Optional[Any] =self.numbers_to_letter(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : str =encoded_message + letter
return encoded_message
def _lowercase ( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
a__ : Optional[Any] =message.lower()
message.replace(" " , "" )
a__ : Union[str, Any] =np.empty(2 * len(lowerCAmelCase__ ) )
for letter_index in range(len(lowerCAmelCase__ ) ):
a__ : str =self.letter_to_numbers(message[letter_index] )
a__ : Any =numbers[0]
a__ : Optional[int] =numbers[1]
a__ : str =first_step.reshape((2, len(lowerCAmelCase__ )) )
a__ : Optional[int] =""
for numbers_index in range(len(lowerCAmelCase__ ) ):
a__ : str =int(second_step[0, numbers_index] )
a__ : int =int(second_step[1, numbers_index] )
a__ : Tuple =self.numbers_to_letter(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : List[str] =decoded_message + letter
return decoded_message
| 563 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
UpperCAmelCase : Tuple = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def _A ( SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def _A ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
if args.student_type == "roberta":
a__ : List[str] =False
elif args.student_type == "gpt2":
a__ : Any =False
def _A ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
if args.student_type == "roberta":
a__ : List[str] =False
def _A ( ):
"""simple docstring"""
a__ : List[str] =argparse.ArgumentParser(description="Training" )
parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists." )
parser.add_argument(
"--dump_path" , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help="The output directory (log, checkpoints, parameters, etc.)" )
parser.add_argument(
"--data_file" , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , )
parser.add_argument(
"--student_type" , type=SCREAMING_SNAKE_CASE , choices=["distilbert", "roberta", "gpt2"] , required=SCREAMING_SNAKE_CASE , help="The student type (DistilBERT, RoBERTa)." , )
parser.add_argument("--student_config" , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help="Path to the student configuration." )
parser.add_argument(
"--student_pretrained_weights" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help="Load student initialization checkpoint." )
parser.add_argument(
"--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=SCREAMING_SNAKE_CASE , help="Teacher type (BERT, RoBERTa)." )
parser.add_argument("--teacher_name" , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help="The teacher model." )
parser.add_argument("--temperature" , default=2.0 , type=SCREAMING_SNAKE_CASE , help="Temperature for the softmax temperature." )
parser.add_argument(
"--alpha_ce" , default=0.5 , type=SCREAMING_SNAKE_CASE , help="Linear weight for the distillation loss. Must be >=0." )
parser.add_argument(
"--alpha_mlm" , default=0.0 , type=SCREAMING_SNAKE_CASE , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , )
parser.add_argument("--alpha_clm" , default=0.5 , type=SCREAMING_SNAKE_CASE , help="Linear weight for the CLM loss. Must be >=0." )
parser.add_argument("--alpha_mse" , default=0.0 , type=SCREAMING_SNAKE_CASE , help="Linear weight of the MSE loss. Must be >=0." )
parser.add_argument(
"--alpha_cos" , default=0.0 , type=SCREAMING_SNAKE_CASE , help="Linear weight of the cosine embedding loss. Must be >=0." )
parser.add_argument(
"--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." )
parser.add_argument(
"--mlm_mask_prop" , default=0.1_5 , type=SCREAMING_SNAKE_CASE , help="Proportion of tokens for which we need to make a prediction." , )
parser.add_argument("--word_mask" , default=0.8 , type=SCREAMING_SNAKE_CASE , help="Proportion of tokens to mask out." )
parser.add_argument("--word_keep" , default=0.1 , type=SCREAMING_SNAKE_CASE , help="Proportion of tokens to keep." )
parser.add_argument("--word_rand" , default=0.1 , type=SCREAMING_SNAKE_CASE , help="Proportion of tokens to randomly replace." )
parser.add_argument(
"--mlm_smoothing" , default=0.7 , type=SCREAMING_SNAKE_CASE , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , )
parser.add_argument("--token_counts" , type=SCREAMING_SNAKE_CASE , help="The token counts in the data_file for MLM." )
parser.add_argument(
"--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , )
parser.add_argument(
"--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only." , )
parser.add_argument(
"--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only." , )
parser.add_argument("--n_epoch" , type=SCREAMING_SNAKE_CASE , default=3 , help="Number of pass on the whole dataset." )
parser.add_argument("--batch_size" , type=SCREAMING_SNAKE_CASE , default=5 , help="Batch size (for each process)." )
parser.add_argument(
"--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , )
parser.add_argument(
"--gradient_accumulation_steps" , type=SCREAMING_SNAKE_CASE , default=50 , help="Gradient accumulation for larger training batches." , )
parser.add_argument("--warmup_prop" , default=0.0_5 , type=SCREAMING_SNAKE_CASE , help="Linear warmup proportion." )
parser.add_argument("--weight_decay" , default=0.0 , type=SCREAMING_SNAKE_CASE , help="Weight decay if we apply some." )
parser.add_argument("--learning_rate" , default=5e-4 , type=SCREAMING_SNAKE_CASE , help="The initial learning rate for Adam." )
parser.add_argument("--adam_epsilon" , default=1e-6 , type=SCREAMING_SNAKE_CASE , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , default=5.0 , type=SCREAMING_SNAKE_CASE , help="Max gradient norm." )
parser.add_argument("--initializer_range" , default=0.0_2 , type=SCREAMING_SNAKE_CASE , help="Random initialization range." )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=SCREAMING_SNAKE_CASE , default="O1" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_gpu" , type=SCREAMING_SNAKE_CASE , default=1 , help="Number of GPUs in the node." )
parser.add_argument("--local_rank" , type=SCREAMING_SNAKE_CASE , default=-1 , help="Distributed training - Local rank" )
parser.add_argument("--seed" , type=SCREAMING_SNAKE_CASE , default=56 , help="Random seed" )
parser.add_argument("--log_interval" , type=SCREAMING_SNAKE_CASE , default=500 , help="Tensorboard logging interval." )
parser.add_argument("--checkpoint_interval" , type=SCREAMING_SNAKE_CASE , default=4_000 , help="Checkpoint interval." )
a__ : Union[str, Any] =parser.parse_args()
sanity_checks(SCREAMING_SNAKE_CASE )
# ARGS #
init_gpu_params(SCREAMING_SNAKE_CASE )
set_seed(SCREAMING_SNAKE_CASE )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
" itUse `--force` if you want to overwrite it" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(f'''Param: {args}''' )
with open(os.path.join(args.dump_path , "parameters.json" ) , "w" ) as f:
json.dump(vars(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , indent=4 )
git_log(args.dump_path )
a__ , a__ , a__ : Tuple =MODEL_CLASSES[args.student_type]
a__ , a__ , a__ : Any =MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
a__ : Any =teacher_tokenizer_class.from_pretrained(args.teacher_name )
a__ : List[Any] ={}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
a__ : List[str] =tokenizer.all_special_tokens.index(SCREAMING_SNAKE_CASE )
a__ : Dict =tokenizer.all_special_ids[idx]
logger.info(f'''Special tokens {special_tok_ids}''' )
a__ : List[Any] =special_tok_ids
a__ : int =tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f'''Loading data from {args.data_file}''' )
with open(args.data_file , "rb" ) as fp:
a__ : Tuple =pickle.load(SCREAMING_SNAKE_CASE )
if args.mlm:
logger.info(f'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , "rb" ) as fp:
a__ : Union[str, Any] =pickle.load(SCREAMING_SNAKE_CASE )
a__ : Tuple =np.maximum(SCREAMING_SNAKE_CASE , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
a__ : Dict =0.0 # do not predict special tokens
a__ : Optional[Any] =torch.from_numpy(SCREAMING_SNAKE_CASE )
else:
a__ : Dict =None
a__ : Any =LmSeqsDataset(params=SCREAMING_SNAKE_CASE , data=SCREAMING_SNAKE_CASE )
logger.info("Data loader created." )
# STUDENT #
logger.info(f'''Loading student config from {args.student_config}''' )
a__ : Dict =student_config_class.from_pretrained(args.student_config )
a__ : Optional[Any] =True
if args.student_pretrained_weights is not None:
logger.info(f'''Loading pretrained weights from {args.student_pretrained_weights}''' )
a__ : List[Any] =student_model_class.from_pretrained(args.student_pretrained_weights , config=SCREAMING_SNAKE_CASE )
else:
a__ : List[Any] =student_model_class(SCREAMING_SNAKE_CASE )
if args.n_gpu > 0:
student.to(f'''cuda:{args.local_rank}''' )
logger.info("Student loaded." )
# TEACHER #
a__ : Any =teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=SCREAMING_SNAKE_CASE )
if args.n_gpu > 0:
teacher.to(f'''cuda:{args.local_rank}''' )
logger.info(f'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
a__ : Optional[Any] =Distiller(
params=SCREAMING_SNAKE_CASE , dataset=SCREAMING_SNAKE_CASE , token_probs=SCREAMING_SNAKE_CASE , student=SCREAMING_SNAKE_CASE , teacher=SCREAMING_SNAKE_CASE )
distiller.train()
logger.info("Let's go get some drinks." )
if __name__ == "__main__":
main()
| 563 | 1 |
"""simple docstring"""
def A( snake_case_ , snake_case_ ):
"""simple docstring"""
lowercase__: List[str] = [1]
for i in range(2 , snake_case_ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
lowercase__: Any = []
lowercase__: str = list(range(snake_case_ ) )
# Find permutation
while factorials:
lowercase__: List[str] = factorials.pop()
lowercase__: List[str] = divmod(snake_case_ , snake_case_ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class _a :
'''simple docstring'''
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_=13 , UpperCAmelCase_=7 , UpperCAmelCase_=True , UpperCAmelCase_=True , UpperCAmelCase_=False , UpperCAmelCase_=True , UpperCAmelCase_=99 , UpperCAmelCase_=32 , UpperCAmelCase_=5 , UpperCAmelCase_=4 , UpperCAmelCase_=37 , UpperCAmelCase_="gelu" , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=512 , UpperCAmelCase_=16 , UpperCAmelCase_=2 , UpperCAmelCase_=0.02 , UpperCAmelCase_=3 , UpperCAmelCase_=4 , UpperCAmelCase_=None , ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Any = parent
lowercase__: List[str] = batch_size
lowercase__: Dict = seq_length
lowercase__: Dict = is_training
lowercase__: List[str] = use_input_mask
lowercase__: Dict = use_token_type_ids
lowercase__: Optional[Any] = use_labels
lowercase__: str = vocab_size
lowercase__: Optional[int] = hidden_size
lowercase__: List[Any] = num_hidden_layers
lowercase__: Tuple = num_attention_heads
lowercase__: Optional[Any] = intermediate_size
lowercase__: Any = hidden_act
lowercase__: Optional[int] = hidden_dropout_prob
lowercase__: Optional[int] = attention_probs_dropout_prob
lowercase__: Dict = max_position_embeddings
lowercase__: Dict = type_vocab_size
lowercase__: Dict = type_sequence_label_size
lowercase__: List[str] = initializer_range
lowercase__: Tuple = num_labels
lowercase__: int = num_choices
lowercase__: Optional[int] = scope
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
lowercase__: Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase__: Union[str, Any] = None
if self.use_input_mask:
lowercase__: Tuple = random_attention_mask([self.batch_size, self.seq_length])
lowercase__: int = None
if self.use_token_type_ids:
lowercase__: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowercase__: Union[str, Any] = None
lowercase__: List[Any] = None
lowercase__: Tuple = None
if self.use_labels:
lowercase__: Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase__: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowercase__: Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices)
lowercase__: int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase ( self) -> str:
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , )
def __lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) -> Union[str, Any]:
'''simple docstring'''
lowercase__: List[str] = LlamaModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowercase__: Optional[int] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_)
lowercase__: int = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ) -> Tuple:
'''simple docstring'''
lowercase__: Tuple = True
lowercase__: Union[str, Any] = LlamaModel(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowercase__: Any = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , encoder_attention_mask=UpperCAmelCase_ , )
lowercase__: Union[str, Any] = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , )
lowercase__: List[Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ) -> Tuple:
'''simple docstring'''
lowercase__: Tuple = LlamaForCausalLM(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowercase__: Optional[int] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ) -> Optional[Any]:
'''simple docstring'''
lowercase__: int = True
lowercase__: List[Any] = True
lowercase__: Any = LlamaForCausalLM(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
# first forward pass
lowercase__: List[Any] = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , encoder_attention_mask=UpperCAmelCase_ , use_cache=UpperCAmelCase_ , )
lowercase__: Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase__: Dict = ids_tensor((self.batch_size, 3) , config.vocab_size)
lowercase__: List[str] = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
lowercase__: Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1)
lowercase__: List[Any] = torch.cat([input_mask, next_mask] , dim=-1)
lowercase__: List[str] = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , encoder_attention_mask=UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , )["hidden_states"][0]
lowercase__: Optional[Any] = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , encoder_attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , )["hidden_states"][0]
# select random slice
lowercase__: Tuple = ids_tensor((1,) , output_from_past.shape[-1]).item()
lowercase__: List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase__: Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3))
def __lowercase ( self) -> Tuple:
'''simple docstring'''
lowercase__: str = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
): int = config_and_inputs
lowercase__: Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _a ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
UpperCamelCase__ = (LlamaForCausalLM,) if is_torch_available() else ()
UpperCamelCase__ = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
lowercase__: int = LlamaModelTester(self)
lowercase__: str = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37)
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
lowercase__: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def __lowercase ( self) -> Any:
'''simple docstring'''
lowercase__: List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase__: Union[str, Any] = type
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
lowercase__ , lowercase__: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__: List[Any] = 3
lowercase__: Optional[int] = input_dict["input_ids"]
lowercase__: List[str] = input_ids.ne(1).to(UpperCAmelCase_)
lowercase__: List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
lowercase__: str = LlamaForSequenceClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowercase__: Union[str, Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
lowercase__ , lowercase__: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__: Tuple = 3
lowercase__: List[Any] = "single_label_classification"
lowercase__: Dict = input_dict["input_ids"]
lowercase__: Dict = input_ids.ne(1).to(UpperCAmelCase_)
lowercase__: Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
lowercase__: List[Any] = LlamaForSequenceClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowercase__: Optional[Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
lowercase__ , lowercase__: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__: Union[str, Any] = 3
lowercase__: List[str] = "multi_label_classification"
lowercase__: Dict = input_dict["input_ids"]
lowercase__: str = input_ids.ne(1).to(UpperCAmelCase_)
lowercase__: Optional[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
lowercase__: int = LlamaForSequenceClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowercase__: Union[str, Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip("LLaMA buffers include complex numbers, which breaks this test")
def __lowercase ( self) -> Any:
'''simple docstring'''
pass
@parameterized.expand([("linear",), ("dynamic",)])
def __lowercase ( self , UpperCAmelCase_) -> List[Any]:
'''simple docstring'''
lowercase__ , lowercase__: int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__: Optional[Any] = ids_tensor([1, 10] , config.vocab_size)
lowercase__: List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(42) # Fixed seed at init time so the two models get the same random weights
lowercase__: Optional[Any] = LlamaModel(UpperCAmelCase_)
original_model.to(UpperCAmelCase_)
original_model.eval()
lowercase__: int = original_model(UpperCAmelCase_).last_hidden_state
lowercase__: int = original_model(UpperCAmelCase_).last_hidden_state
set_seed(42) # Fixed seed at init time so the two models get the same random weights
lowercase__: str = {"type": scaling_type, "factor": 10.0}
lowercase__: str = LlamaModel(UpperCAmelCase_)
scaled_model.to(UpperCAmelCase_)
scaled_model.eval()
lowercase__: Optional[Any] = scaled_model(UpperCAmelCase_).last_hidden_state
lowercase__: Optional[Any] = scaled_model(UpperCAmelCase_).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-5))
else:
self.assertFalse(torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-5))
@require_torch
class _a ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!")
@slow
def __lowercase ( self) -> str:
'''simple docstring'''
lowercase__: Optional[Any] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowercase__: Optional[int] = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf" , device_map="auto")
lowercase__: str = model(torch.tensor([input_ids]))
# Expected mean on dim = -1
lowercase__: int = torch.tensor([[-6.65_50, -4.12_27, -4.98_59, -3.24_06, 0.82_62, -3.00_33, 1.29_64, -3.36_99]])
torch.testing.assert_close(out.mean(-1) , UpperCAmelCase_ , atol=1E-2 , rtol=1E-2)
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase__: Dict = torch.tensor([-12.82_81, -7.44_53, -0.46_39, -8.06_25, -7.25_00, -8.00_00, -6.48_83, -7.76_95, -7.84_38, -7.03_12, -6.21_88, -7.13_28, -1.84_96, 1.99_61, -8.62_50, -6.72_27, -12.82_81, -6.94_92, -7.07_42, -7.78_52, -7.58_20, -7.90_62, -6.93_75, -7.98_05, -8.34_38, -8.15_62, -8.04_69, -7.62_50, -7.74_22, -7.33_98,])
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCAmelCase_ , atol=1E-5 , rtol=1E-5)
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!")
@slow
def __lowercase ( self) -> Any:
'''simple docstring'''
lowercase__: List[Any] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowercase__: List[Any] = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-hf" , device_map="auto")
lowercase__: Optional[Any] = model(torch.tensor(UpperCAmelCase_))
# Expected mean on dim = -1
lowercase__: Union[str, Any] = torch.tensor([[-2.06_22, -1.27_94, -1.16_38, -0.97_88, -1.46_03, -1.02_38, -1.78_93, -1.44_11]])
torch.testing.assert_close(out.mean(-1) , UpperCAmelCase_ , atol=1E-2 , rtol=1E-2)
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase__: Dict = torch.tensor([-8.14_06, -8.05_47, 2.74_61, -1.23_44, -0.14_48, -1.82_62, -1.00_20, -1.81_54, -1.68_95, -1.85_16, -2.35_74, -0.92_77, 3.75_98, 6.57_42, -1.29_98, -0.11_77, -8.14_06, -2.96_88, -2.91_99, -3.16_99, -3.52_54, -2.35_55, -2.79_88, -3.41_41, -2.82_62, -4.51_95, -3.33_79, -3.31_64, -2.78_32, -3.02_73])
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCAmelCase_ , atol=1E-5 , rtol=1E-5)
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!")
@slow
def __lowercase ( self) -> Tuple:
'''simple docstring'''
lowercase__: Optional[int] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowercase__: Optional[int] = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-chat-hf" , device_map="auto")
lowercase__: str = model(torch.tensor(UpperCAmelCase_))
# Expected mean on dim = -1
lowercase__: List[Any] = torch.tensor([[-0.85_62, -1.85_20, -0.75_51, -0.41_62, -1.51_61, -1.20_38, -2.48_23, -2.32_54]])
torch.testing.assert_close(out.mean(-1) , UpperCAmelCase_ , atol=1E-2 , rtol=1E-2)
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase__: Dict = torch.tensor([-2.22_27, 4.88_28, 0.90_23, -0.45_78, -0.78_71, -0.10_33, -0.62_21, -0.57_86, -0.78_03, -1.06_74, -1.29_20, -0.15_70, 0.80_08, 2.07_23, -0.94_97, 0.27_71, -2.22_27, -0.76_12, -1.43_46, -1.20_61, -1.64_26, -0.30_00, -0.71_39, -1.19_34, -1.86_91, -1.69_73, -1.59_47, -1.27_05, -0.35_23, -0.55_13])
# fmt: on
torch.testing.assert_close(out.mean(-1) , UpperCAmelCase_ , atol=1E-2 , rtol=1E-2)
@unittest.skip(
"Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test")
@slow
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Dict = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowercase__: List[str] = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-70b-hf" , device_map="auto")
lowercase__: Optional[Any] = model(torch.tensor(UpperCAmelCase_))
lowercase__: Any = torch.tensor(
[[-4.23_27, -3.33_60, -4.66_65, -4.76_31, -1.81_80, -3.41_70, -1.42_11, -3.18_10]] , dtype=torch.floataa)
torch.testing.assert_close(out.mean(-1) , UpperCAmelCase_ , atol=1E-2 , rtol=1E-2)
# fmt: off
lowercase__: List[str] = torch.tensor([-9.49_22, -3.95_51, 1.79_98, -5.67_58, -5.10_55, -5.89_84, -4.83_20, -6.80_86, -6.53_91, -5.61_72, -5.58_20, -5.53_52, 1.78_81, 3.62_89, -6.51_17, -3.47_85, -9.50_00, -6.03_52, -6.81_25, -6.01_95, -6.68_36, -5.47_27, -6.28_12, -6.03_91, -7.33_98, -7.42_97, -7.48_44, -6.58_20, -5.87_89, -5.53_12])
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCAmelCase_ , atol=1E-5 , rtol=1E-5)
@unittest.skip("Model is curently gated")
@slow
def __lowercase ( self) -> Tuple:
'''simple docstring'''
lowercase__: List[Any] = "Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"
lowercase__: List[str] = "Simply put, the theory of relativity states that "
lowercase__: Dict = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-13b-chat-hf")
lowercase__: Tuple = tokenizer.encode(UpperCAmelCase_ , return_tensors="pt")
lowercase__: Tuple = LlamaForCausalLM.from_pretrained(
"meta-llama/Llama-2-13b-chat-hf" , device_map="sequential" , use_safetensors=UpperCAmelCase_)
# greedy generation outputs
lowercase__: List[str] = model.generate(UpperCAmelCase_ , max_new_tokens=64 , top_p=UpperCAmelCase_ , temperature=1 , do_sample=UpperCAmelCase_)
lowercase__: Any = tokenizer.decode(generated_ids[0] , skip_special_tokens=UpperCAmelCase_)
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
| 120 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : Tuple = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = ['ReformerTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Any = ['ReformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ReformerAttention',
'ReformerForMaskedLM',
'ReformerForQuestionAnswering',
'ReformerForSequenceClassification',
'ReformerLayer',
'ReformerModel',
'ReformerModelWithLMHead',
'ReformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 460 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : List[Any]=None ):
'''simple docstring'''
if subparsers is not None:
A: Optional[Any] = subparsers.add_parser("""env""" )
else:
A: Union[str, Any] = argparse.ArgumentParser("""Accelerate env command""" )
parser.add_argument(
"""--config_file""" , default=lowerCamelCase__ , help="""The config file to use for the default values in the launching script.""" )
if subparsers is not None:
parser.set_defaults(func=lowerCamelCase__ )
return parser
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
A: Optional[int] = torch.__version__
A: int = torch.cuda.is_available()
A: List[Any] = is_xpu_available()
A: List[str] = is_npu_available()
A: Dict = """Not found"""
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(lowerCamelCase__ ):
A: str = load_config_from_file(args.config_file ).to_dict()
A: Optional[Any] = {
"""`Accelerate` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Numpy version""": np.__version__,
"""PyTorch version (GPU?)""": f'{pt_version} ({pt_cuda_available})',
"""PyTorch XPU available""": str(lowerCamelCase__ ),
"""PyTorch NPU available""": str(lowerCamelCase__ ),
"""System RAM""": f'{psutil.virtual_memory().total / 1024 ** 3:.2f} GB',
}
if pt_cuda_available:
A: Dict = torch.cuda.get_device_name()
print("""\nCopy-and-paste the text below in your GitHub issue\n""" )
print("""\n""".join([f'- {prop}: {val}' for prop, val in info.items()] ) )
print("""- `Accelerate` default config:""" if args.config_file is None else """- `Accelerate` config passed:""" )
A: Dict = (
"""\n""".join([f'\t- {prop}: {val}' for prop, val in accelerate_config.items()] )
if isinstance(lowerCamelCase__ , lowerCamelCase__ )
else f'\t{accelerate_config}'
)
print(lowerCamelCase__ )
A: Any = accelerate_config
return info
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
A: List[Any] = env_command_parser()
A: Dict = parser.parse_args()
env_command(lowerCamelCase__ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 135 | 0 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def snake_case ( UpperCAmelCase : int, UpperCAmelCase : Union[str, Any] ):
A = checkpoint
A = {}
A = vae_state_dict['encoder.conv_in.weight']
A = vae_state_dict['encoder.conv_in.bias']
A = vae_state_dict['encoder.conv_out.weight']
A = vae_state_dict['encoder.conv_out.bias']
A = vae_state_dict['encoder.norm_out.weight']
A = vae_state_dict['encoder.norm_out.bias']
A = vae_state_dict['decoder.conv_in.weight']
A = vae_state_dict['decoder.conv_in.bias']
A = vae_state_dict['decoder.conv_out.weight']
A = vae_state_dict['decoder.conv_out.bias']
A = vae_state_dict['decoder.norm_out.weight']
A = vae_state_dict['decoder.norm_out.bias']
A = vae_state_dict['quant_conv.weight']
A = vae_state_dict['quant_conv.bias']
A = vae_state_dict['post_quant_conv.weight']
A = vae_state_dict['post_quant_conv.bias']
# Retrieves the keys for the encoder down blocks only
A = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'encoder.down' in layer} )
A = {
layer_id: [key for key in vae_state_dict if f'down.{layer_id}' in key] for layer_id in range(UpperCAmelCase )
}
# Retrieves the keys for the decoder up blocks only
A = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'decoder.up' in layer} )
A = {
layer_id: [key for key in vae_state_dict if f'up.{layer_id}' in key] for layer_id in range(UpperCAmelCase )
}
for i in range(UpperCAmelCase ):
A = [key for key in down_blocks[i] if f'down.{i}' in key and f'down.{i}.downsample' not in key]
if f'encoder.down.{i}.downsample.conv.weight' in vae_state_dict:
A = vae_state_dict.pop(
f'encoder.down.{i}.downsample.conv.weight' )
A = vae_state_dict.pop(
f'encoder.down.{i}.downsample.conv.bias' )
A = renew_vae_resnet_paths(UpperCAmelCase )
A = {'old': f'down.{i}.block', 'new': f'down_blocks.{i}.resnets'}
assign_to_checkpoint(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, additional_replacements=[meta_path], config=UpperCAmelCase )
A = [key for key in vae_state_dict if 'encoder.mid.block' in key]
A = 2
for i in range(1, num_mid_res_blocks + 1 ):
A = [key for key in mid_resnets if f'encoder.mid.block_{i}' in key]
A = renew_vae_resnet_paths(UpperCAmelCase )
A = {'old': f'mid.block_{i}', 'new': f'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, additional_replacements=[meta_path], config=UpperCAmelCase )
A = [key for key in vae_state_dict if 'encoder.mid.attn' in key]
A = renew_vae_attention_paths(UpperCAmelCase )
A = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, additional_replacements=[meta_path], config=UpperCAmelCase )
conv_attn_to_linear(UpperCAmelCase )
for i in range(UpperCAmelCase ):
A = num_up_blocks - 1 - i
A = [
key for key in up_blocks[block_id] if f'up.{block_id}' in key and f'up.{block_id}.upsample' not in key
]
if f'decoder.up.{block_id}.upsample.conv.weight' in vae_state_dict:
A = vae_state_dict[
f'decoder.up.{block_id}.upsample.conv.weight'
]
A = vae_state_dict[
f'decoder.up.{block_id}.upsample.conv.bias'
]
A = renew_vae_resnet_paths(UpperCAmelCase )
A = {'old': f'up.{block_id}.block', 'new': f'up_blocks.{i}.resnets'}
assign_to_checkpoint(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, additional_replacements=[meta_path], config=UpperCAmelCase )
A = [key for key in vae_state_dict if 'decoder.mid.block' in key]
A = 2
for i in range(1, num_mid_res_blocks + 1 ):
A = [key for key in mid_resnets if f'decoder.mid.block_{i}' in key]
A = renew_vae_resnet_paths(UpperCAmelCase )
A = {'old': f'mid.block_{i}', 'new': f'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, additional_replacements=[meta_path], config=UpperCAmelCase )
A = [key for key in vae_state_dict if 'decoder.mid.attn' in key]
A = renew_vae_attention_paths(UpperCAmelCase )
A = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, additional_replacements=[meta_path], config=UpperCAmelCase )
conv_attn_to_linear(UpperCAmelCase )
return new_checkpoint
def snake_case ( UpperCAmelCase : str, UpperCAmelCase : str, ):
# Only support V1
A = requests.get(
' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml' )
A = io.BytesIO(r.content )
A = OmegaConf.load(UpperCAmelCase )
A = 5_12
A = 'cuda' if torch.cuda.is_available() else 'cpu'
if checkpoint_path.endswith('safetensors' ):
from safetensors import safe_open
A = {}
with safe_open(UpperCAmelCase, framework='pt', device='cpu' ) as f:
for key in f.keys():
A = f.get_tensor(UpperCAmelCase )
else:
A = torch.load(UpperCAmelCase, map_location=UpperCAmelCase )['state_dict']
# Convert the VAE model.
A = create_vae_diffusers_config(UpperCAmelCase, image_size=UpperCAmelCase )
A = custom_convert_ldm_vae_checkpoint(UpperCAmelCase, UpperCAmelCase )
A = AutoencoderKL(**UpperCAmelCase )
vae.load_state_dict(UpperCAmelCase )
vae.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
lowerCAmelCase_ = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 110 |
from math import factorial
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Any ,_SCREAMING_SNAKE_CASE : List[Any] ,_SCREAMING_SNAKE_CASE : List[str] ) -> List[str]:
'''simple docstring'''
A = real
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
A = [1] * rank
else:
A = rank
def __repr__( self : Any ) -> Optional[Any]:
'''simple docstring'''
return (
f'{self.real}+'
f'{"+".join(str(_SCREAMING_SNAKE_CASE )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'
)
def A( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
A = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real ,_SCREAMING_SNAKE_CASE )
def __add__( self : Optional[Any] ,_SCREAMING_SNAKE_CASE : int ) -> Any:
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
return Dual(self.real + other ,self.duals )
A = self.duals.copy()
A = other.duals.copy()
if len(_SCREAMING_SNAKE_CASE ) > len(_SCREAMING_SNAKE_CASE ):
o_dual.extend([1] * (len(_SCREAMING_SNAKE_CASE ) - len(_SCREAMING_SNAKE_CASE )) )
elif len(_SCREAMING_SNAKE_CASE ) < len(_SCREAMING_SNAKE_CASE ):
s_dual.extend([1] * (len(_SCREAMING_SNAKE_CASE ) - len(_SCREAMING_SNAKE_CASE )) )
A = []
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real ,_SCREAMING_SNAKE_CASE )
snake_case = __add__
def __sub__( self : Tuple ,_SCREAMING_SNAKE_CASE : Dict ) -> Optional[Any]:
'''simple docstring'''
return self + other * -1
def __mul__( self : Any ,_SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
A = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other ,_SCREAMING_SNAKE_CASE )
A = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real ,_SCREAMING_SNAKE_CASE )
snake_case = __mul__
def __truediv__( self : Union[str, Any] ,_SCREAMING_SNAKE_CASE : int ) -> List[str]:
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
A = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other ,_SCREAMING_SNAKE_CASE )
raise ValueError
def __floordiv__( self : Any ,_SCREAMING_SNAKE_CASE : Any ) -> Dict:
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
A = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other ,_SCREAMING_SNAKE_CASE )
raise ValueError
def __pow__( self : int ,_SCREAMING_SNAKE_CASE : Union[str, Any] ) -> str:
'''simple docstring'''
if n < 0 or isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
raise ValueError('power must be a positive integer' )
if n == 0:
return 1
if n == 1:
return self
A = self
for _ in range(n - 1 ):
x *= self
return x
def snake_case ( UpperCAmelCase : Optional[int], UpperCAmelCase : List[Any], UpperCAmelCase : int ):
if not callable(UpperCAmelCase ):
raise ValueError('differentiate() requires a function as input for func' )
if not isinstance(UpperCAmelCase, (float, int) ):
raise ValueError('differentiate() requires a float as input for position' )
if not isinstance(UpperCAmelCase, UpperCAmelCase ):
raise ValueError('differentiate() requires an int as input for order' )
A = Dual(UpperCAmelCase, 1 )
A = func(UpperCAmelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def snake_case ( UpperCAmelCase : int ):
return y**2 * y**4
print(differentiate(f, 9, 2))
| 110 | 1 |
"""simple docstring"""
from math import factorial
SCREAMING_SNAKE_CASE_ = {str(d): factorial(d) for d in range(10)}
def A__ ( A__ ) -> int:
'''simple docstring'''
return sum(DIGIT_FACTORIAL[d] for d in str(_UpperCAmelCase ) )
def A__ ( ) -> int:
'''simple docstring'''
_UpperCAmelCase = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , _UpperCAmelCase ) if sum_of_digit_factorial(_UpperCAmelCase ) == i )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 426 |
'''simple docstring'''
from __future__ import annotations
def a_ ( _UpperCAmelCase : list[int] ) -> bool:
return len(set(_UpperCAmelCase ) ) == len(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 286 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_lowerCamelCase : Any = logging.get_logger(__name__)
if is_vision_available():
import PIL
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""pixel_values"""]
def __init__( self : Optional[int] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 2_5_5 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : bool = True , **UpperCamelCase__ : Dict , ):
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
UpperCamelCase = size if size is not None else {'shortest_edge': 2_2_4}
UpperCamelCase = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
UpperCamelCase = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
UpperCamelCase = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ , param_name='crop_size' )
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = resample
UpperCamelCase = do_center_crop
UpperCamelCase = crop_size
UpperCamelCase = do_rescale
UpperCamelCase = rescale_factor
UpperCamelCase = do_normalize
UpperCamelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCamelCase = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCamelCase = do_convert_rgb
def A ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ):
"""simple docstring"""
UpperCamelCase = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
UpperCamelCase = get_resize_output_image_size(UpperCamelCase__ , size=size['shortest_edge'] , default_to_square=UpperCamelCase__ )
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def A ( self : List[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : int , ):
"""simple docstring"""
UpperCamelCase = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(UpperCamelCase__ , size=(size['height'], size['width']) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def A ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : List[Any] , ):
"""simple docstring"""
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def A ( self : List[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Any , ):
"""simple docstring"""
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def A ( self : List[str] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : str , ):
"""simple docstring"""
UpperCamelCase = do_resize if do_resize is not None else self.do_resize
UpperCamelCase = size if size is not None else self.size
UpperCamelCase = get_size_dict(UpperCamelCase__ , param_name='size' , default_to_square=UpperCamelCase__ )
UpperCamelCase = resample if resample is not None else self.resample
UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase = crop_size if crop_size is not None else self.crop_size
UpperCamelCase = get_size_dict(UpperCamelCase__ , param_name='crop_size' , default_to_square=UpperCamelCase__ )
UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase = image_mean if image_mean is not None else self.image_mean
UpperCamelCase = image_std if image_std is not None else self.image_std
UpperCamelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase = [convert_to_rgb(UpperCamelCase__ ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
UpperCamelCase = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_center_crop:
UpperCamelCase = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images]
if do_rescale:
UpperCamelCase = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
UpperCamelCase = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
UpperCamelCase = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
UpperCamelCase = {'pixel_values': images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 701 |
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
_lowerCamelCase : List[str] = [
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def A ( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : bool , UpperCamelCase__ : str = None , UpperCamelCase__ : list = None ):
"""simple docstring"""
UpperCamelCase = None
UpperCamelCase = os.path.abspath(os.path.join('examples' , 'by_feature' ) )
UpperCamelCase = os.path.abspath('examples' )
for item in os.listdir(UpperCamelCase__ ):
if item not in EXCLUDE_EXAMPLES:
UpperCamelCase = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
if os.path.isfile(UpperCamelCase__ ) and ".py" in item_path:
with self.subTest(
tested_script=UpperCamelCase__ , feature_script=UpperCamelCase__ , tested_section='main()' if parser_only else 'training_function()' , ):
UpperCamelCase = compare_against_test(
os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = '\n'.join(UpperCamelCase__ )
if special_strings is not None:
for string in special_strings:
UpperCamelCase = diff.replace(UpperCamelCase__ , '' )
self.assertEqual(UpperCamelCase__ , '' )
def A ( self : Optional[Any] ):
"""simple docstring"""
self.one_complete_example('complete_nlp_example.py' , UpperCamelCase__ )
self.one_complete_example('complete_nlp_example.py' , UpperCamelCase__ )
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = os.path.abspath(os.path.join('examples' , 'cv_example.py' ) )
UpperCamelCase = [
' ' * 1_6 + '{\n\n',
' ' * 2_0 + '"accuracy": eval_metric["accuracy"],\n\n',
' ' * 2_0 + '"f1": eval_metric["f1"],\n\n',
' ' * 2_0 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n',
' ' * 2_0 + '"epoch": epoch,\n\n',
' ' * 1_6 + '},\n\n',
' ' * 1_6 + 'step=epoch,\n',
' ' * 1_2,
' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n',
]
self.one_complete_example('complete_cv_example.py' , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
self.one_complete_example('complete_cv_example.py' , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
@mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} )
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = False
@classmethod
def A ( cls : Union[str, Any] ):
"""simple docstring"""
super().setUpClass()
UpperCamelCase = tempfile.mkdtemp()
UpperCamelCase = os.path.join(cls._tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
UpperCamelCase = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def A ( cls : List[Any] ):
"""simple docstring"""
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = f"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) )
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = f"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
UpperCamelCase = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) )
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = f"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}
""".split()
UpperCamelCase = run_command(self._launch_args + testargs , return_stdout=UpperCamelCase__ )
self.assertNotIn('epoch 0:' , UpperCamelCase__ )
self.assertIn('epoch 1:' , UpperCamelCase__ )
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = f"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}
""".split()
UpperCamelCase = run_command(self._launch_args + testargs , return_stdout=UpperCamelCase__ )
if torch.cuda.is_available():
UpperCamelCase = torch.cuda.device_count()
else:
UpperCamelCase = 1
if num_processes > 1:
self.assertNotIn('epoch 0:' , UpperCamelCase__ )
self.assertIn('epoch 1:' , UpperCamelCase__ )
else:
self.assertIn('epoch 0:' , UpperCamelCase__ )
self.assertIn('epoch 1:' , UpperCamelCase__ )
@slow
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split()
with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ):
UpperCamelCase = run_command(self._launch_args + testargs , return_stdout=UpperCamelCase__ )
UpperCamelCase = re.findall('({.+})' , UpperCamelCase__ )
UpperCamelCase = [r for r in results if 'accuracy' in r][-1]
UpperCamelCase = ast.literal_eval(UpperCamelCase__ )
self.assertGreaterEqual(results['accuracy'] , 0.7_5 )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = ['examples/by_feature/multi_process_metrics.py']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A ( self : Dict ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
UpperCamelCase = f"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , 'tracking' ) ) )
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = ['examples/by_feature/gradient_accumulation.py']
run_command(self._launch_args + testargs )
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = ['examples/by_feature/local_sgd.py']
run_command(self._launch_args + testargs )
| 324 | 0 |
'''simple docstring'''
import functools
def _SCREAMING_SNAKE_CASE ( __snake_case : list[int] , __snake_case : list[int] ):
# Validation
if not isinstance(__snake_case , __snake_case ) or not all(isinstance(__snake_case , __snake_case ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(__snake_case ) != 3 or not all(isinstance(__snake_case , __snake_case ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(__snake_case ) == 0:
return 0
if min(__snake_case ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(__snake_case ) >= 3_6_6:
raise ValueError('All days elements should be less than 366' )
_A = set(__snake_case )
@functools.cache
def dynamic_programming(__snake_case : int ) -> int:
if index > 3_6_5:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 3_0 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 107 | '''simple docstring'''
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : str = logging.get_logger(__name__)
lowerCAmelCase_ : Optional[int] = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase__ = '''efficientformer'''
def __init__( self : Optional[Any] , lowercase__ : List[int] = [3, 2, 6, 4] , lowercase__ : List[int] = [48, 96, 224, 448] , lowercase__ : List[bool] = [True, True, True, True] , lowercase__ : int = 448 , lowercase__ : int = 32 , lowercase__ : int = 4 , lowercase__ : int = 7 , lowercase__ : int = 5 , lowercase__ : int = 8 , lowercase__ : int = 4 , lowercase__ : float = 0.0 , lowercase__ : int = 16 , lowercase__ : int = 3 , lowercase__ : int = 3 , lowercase__ : int = 3 , lowercase__ : int = 2 , lowercase__ : int = 1 , lowercase__ : float = 0.0 , lowercase__ : int = 1 , lowercase__ : bool = True , lowercase__ : bool = True , lowercase__ : float = 1e-5 , lowercase__ : str = "gelu" , lowercase__ : float = 0.0_2 , lowercase__ : float = 1e-12 , lowercase__ : int = 224 , lowercase__ : float = 1e-05 , **lowercase__ : Any , ) ->None:
'''simple docstring'''
super().__init__(**lowercase__ )
_UpperCamelCase : Any = hidden_act
_UpperCamelCase : Optional[Any] = hidden_dropout_prob
_UpperCamelCase : List[str] = hidden_sizes
_UpperCamelCase : Optional[Any] = num_hidden_layers
_UpperCamelCase : List[Any] = num_attention_heads
_UpperCamelCase : int = initializer_range
_UpperCamelCase : Dict = layer_norm_eps
_UpperCamelCase : Any = patch_size
_UpperCamelCase : int = num_channels
_UpperCamelCase : int = depths
_UpperCamelCase : Union[str, Any] = mlp_expansion_ratio
_UpperCamelCase : Union[str, Any] = downsamples
_UpperCamelCase : Optional[Any] = dim
_UpperCamelCase : Tuple = key_dim
_UpperCamelCase : Tuple = attention_ratio
_UpperCamelCase : Dict = resolution
_UpperCamelCase : Any = pool_size
_UpperCamelCase : List[Any] = downsample_patch_size
_UpperCamelCase : str = downsample_stride
_UpperCamelCase : int = downsample_pad
_UpperCamelCase : Any = drop_path_rate
_UpperCamelCase : List[str] = num_metaad_blocks
_UpperCamelCase : List[Any] = distillation
_UpperCamelCase : Optional[Any] = use_layer_scale
_UpperCamelCase : Union[str, Any] = layer_scale_init_value
_UpperCamelCase : Tuple = image_size
_UpperCamelCase : Union[str, Any] = batch_norm_eps
| 435 | 0 |
"""simple docstring"""
from __future__ import annotations
UpperCAmelCase__ = list[tuple[int, int]]
UpperCAmelCase__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCAmelCase__ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class a :
def __init__( self : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : float , __lowerCAmelCase : Node | None , ):
_UpperCAmelCase = pos_x
_UpperCAmelCase = pos_y
_UpperCAmelCase = (pos_y, pos_x)
_UpperCAmelCase = goal_x
_UpperCAmelCase = goal_y
_UpperCAmelCase = g_cost
_UpperCAmelCase = parent
_UpperCAmelCase = self.calculate_heuristic()
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = abs(self.pos_x - self.goal_x )
_UpperCAmelCase = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self : Union[str, Any] , __lowerCAmelCase : Dict ):
return self.f_cost < other.f_cost
class a :
def __init__( self : List[str] , __lowerCAmelCase : tuple[int, int] , __lowerCAmelCase : tuple[int, int] ):
_UpperCAmelCase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __lowerCAmelCase )
_UpperCAmelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , __lowerCAmelCase )
_UpperCAmelCase = [self.start]
_UpperCAmelCase = []
_UpperCAmelCase = False
def lowerCAmelCase_ ( self : List[str] ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_UpperCAmelCase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
_UpperCAmelCase = True
return self.retrace_path(__lowerCAmelCase )
self.closed_nodes.append(__lowerCAmelCase )
_UpperCAmelCase = self.get_successors(__lowerCAmelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__lowerCAmelCase )
else:
# retrieve the best current path
_UpperCAmelCase = self.open_nodes.pop(self.open_nodes.index(__lowerCAmelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__lowerCAmelCase )
else:
self.open_nodes.append(__lowerCAmelCase )
if not self.reached:
return [self.start.pos]
return None
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : Node ):
_UpperCAmelCase = []
for action in delta:
_UpperCAmelCase = parent.pos_x + action[1]
_UpperCAmelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__lowerCAmelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__lowerCAmelCase , __lowerCAmelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __lowerCAmelCase , ) )
return successors
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Node | None ):
_UpperCAmelCase = node
_UpperCAmelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_UpperCAmelCase = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
UpperCAmelCase__ = (0, 0)
UpperCAmelCase__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("""------""")
UpperCAmelCase__ = GreedyBestFirst(init, goal)
UpperCAmelCase__ = greedy_bf.search()
if path:
for pos_x, pos_y in path:
UpperCAmelCase__ = 2
for elem in grid:
print(elem)
| 706 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase__ = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 275 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : List[str] = logging.get_logger(__name__)
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'timm_backbone'
def __init__( self : Any , lowerCamelCase__ : str=None , lowerCamelCase__ : Optional[int]=3 , lowerCamelCase__ : Dict=True , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Any=None , **lowerCamelCase__ : List[str] , ):
super().__init__(**lowerCamelCase__ )
a__ : Any = backbone
a__ : Any = num_channels
a__ : Union[str, Any] = features_only
a__ : List[str] = use_pretrained_backbone
a__ : Optional[Any] = True
a__ : Optional[int] = out_indices if out_indices is not None else (-1,)
| 37 |
"""simple docstring"""
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class UpperCamelCase (__snake_case , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = XLMProphetNetTokenizer
_SCREAMING_SNAKE_CASE : Optional[int] = False
_SCREAMING_SNAKE_CASE : Optional[int] = True
def __snake_case ( self :Optional[int] ) ->List[str]:
super().setUp()
# We have a SentencePiece fixture for testing
lowercase : List[str] = XLMProphetNetTokenizer(__magic_name__ , keep_accents=__magic_name__ )
tokenizer.save_pretrained(self.tmpdirname )
def __snake_case ( self :int ) ->Any:
lowercase : Dict = """[PAD]"""
lowercase : Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def __snake_case ( self :Any ) ->Union[str, Any]:
lowercase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """[PAD]""" )
self.assertEqual(vocab_keys[1] , """[CLS]""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(__magic_name__ ) , 1_012 )
def __snake_case ( self :int ) ->int:
self.assertEqual(self.get_tokenizer().vocab_size , 1_012 )
def __snake_case ( self :Optional[Any] ) ->List[Any]:
lowercase : Optional[Any] = XLMProphetNetTokenizer(__magic_name__ , keep_accents=__magic_name__ )
lowercase : Union[str, Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__magic_name__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__magic_name__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowercase : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__magic_name__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowercase : Optional[int] = tokenizer.convert_tokens_to_ids(__magic_name__ )
self.assertListEqual(
__magic_name__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
lowercase : Union[str, Any] = tokenizer.convert_ids_to_tokens(__magic_name__ )
self.assertListEqual(
__magic_name__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""[UNK]""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""[UNK]""",
""".""",
] , )
@cached_property
def __snake_case ( self :List[Any] ) ->str:
return XLMProphetNetTokenizer.from_pretrained("""microsoft/xprophetnet-large-wiki100-cased""" )
@slow
def __snake_case ( self :int ) ->Optional[Any]:
lowercase : str = """Hello World!"""
lowercase : Optional[Any] = [35_389, 6_672, 49, 2]
self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) )
@slow
def __snake_case ( self :Optional[int] ) ->Union[str, Any]:
# fmt: off
lowercase : Optional[int] = {"""input_ids""": [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name="""microsoft/xprophetnet-large-wiki100-cased""" , revision="""1acad1643ddd54a44df6a1b797ada8373685d90e""" , )
| 264 | 0 |
"""simple docstring"""
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
_A = logging.get_logger(__name__)
def lowercase (_snake_case=None ,_snake_case=None ) -> int:
'''simple docstring'''
return field(default_factory=lambda: default ,metadata=_snake_case )
@dataclass
class __UpperCAmelCase :
"""simple docstring"""
_snake_case : List[str] = list_field(
default=[] , metadata={
'help': (
'Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'
' of all available models'
)
} , )
_snake_case : List[int] = list_field(
default=[8] , metadata={'help': 'List of batch sizes for which memory and time performance will be evaluated'} )
_snake_case : List[int] = list_field(
default=[8, 3_2, 1_2_8, 5_1_2] , metadata={'help': 'List of sequence lengths for which memory and time performance will be evaluated'} , )
_snake_case : bool = field(
default=snake_case__ , metadata={'help': 'Whether to benchmark inference of model. Inference can be disabled via --no-inference.'} , )
_snake_case : bool = field(
default=snake_case__ , metadata={'help': 'Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'} , )
_snake_case : bool = field(
default=snake_case__ , metadata={'help': 'Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'} )
_snake_case : bool = field(default=snake_case__ , metadata={'help': 'Use FP16 to accelerate inference.'} )
_snake_case : bool = field(default=snake_case__ , metadata={'help': 'Benchmark training of model'} )
_snake_case : bool = field(default=snake_case__ , metadata={'help': 'Verbose memory tracing'} )
_snake_case : bool = field(
default=snake_case__ , metadata={'help': 'Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'} , )
_snake_case : bool = field(
default=snake_case__ , metadata={
'help': 'Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'
} , )
_snake_case : bool = field(default=snake_case__ , metadata={'help': 'Trace memory line by line'} )
_snake_case : bool = field(default=snake_case__ , metadata={'help': 'Save result to a CSV file'} )
_snake_case : bool = field(default=snake_case__ , metadata={'help': 'Save all print statements in a log file'} )
_snake_case : bool = field(default=snake_case__ , metadata={'help': 'Whether to print environment information'} )
_snake_case : bool = field(
default=snake_case__ , metadata={
'help': (
'Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'
' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'
' for debugging / testing and on TPU.'
)
} , )
_snake_case : str = field(
default=F'''inference_time_{round(time() )}.csv''' , metadata={'help': 'CSV filename used if saving time results to csv.'} , )
_snake_case : str = field(
default=F'''inference_memory_{round(time() )}.csv''' , metadata={'help': 'CSV filename used if saving memory results to csv.'} , )
_snake_case : str = field(
default=F'''train_time_{round(time() )}.csv''' , metadata={'help': 'CSV filename used if saving time results to csv for training.'} , )
_snake_case : str = field(
default=F'''train_memory_{round(time() )}.csv''' , metadata={'help': 'CSV filename used if saving memory results to csv for training.'} , )
_snake_case : str = field(
default=F'''env_info_{round(time() )}.csv''' , metadata={'help': 'CSV filename used if saving environment information.'} , )
_snake_case : str = field(
default=F'''log_{round(time() )}.csv''' , metadata={'help': 'Log filename used if print statements are saved in log.'} , )
_snake_case : int = field(default=3 , metadata={'help': 'Times an experiment will be run.'} )
_snake_case : bool = field(
default=snake_case__ , metadata={
'help': (
'Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'
' model weights.'
)
} , )
def A ( self : Tuple )-> Dict:
warnings.warn(
f"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
" are deprecated in general and it is advised to use external Benchmarking libraries "
" to benchmark Transformer models." , A_ , )
def A ( self : Dict )-> Union[str, Any]:
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def A ( self : str )-> List[str]:
if len(self.models ) <= 0:
raise ValueError(
"Please make sure you provide at least one model name / model identifier, *e.g.* `--models"
" bert-base-cased` or `args.models = ['bert-base-cased']." )
return self.models
@property
def A ( self : Union[str, Any] )-> Optional[Any]:
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("Multiprocessing is currently not possible on TPU." )
return False
else:
return True | 228 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_A = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 228 | 1 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase )
class lowerCamelCase_ ( lowerCamelCase ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
a__ = field(default='''question-answering-extractive''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
a__ = Features({'''question''': Value('''string''' ), '''context''': Value('''string''' )} )
a__ = Features(
{
'''answers''': Sequence(
{
'''text''': Value('''string''' ),
'''answer_start''': Value('''int32''' ),
} )
} )
a__ = "question"
a__ = "context"
a__ = "answers"
@property
def A ( self ):
"""simple docstring"""
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
SCREAMING_SNAKE_CASE__ : Optional[int] = {"""tokenization_herbert""": ["""HerbertTokenizer"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""HerbertTokenizerFast"""]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 0 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"""uclanlp/visualbert-vqa""": """https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-pre""": """https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-vcr""": """https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-pre""": """https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-nlvr2""": """https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-pre""": """https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"""
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
snake_case = "visual_bert"
def __init__( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any]=3_0522 , SCREAMING_SNAKE_CASE_ : Optional[int]=768 , SCREAMING_SNAKE_CASE_ : str=512 , SCREAMING_SNAKE_CASE_ : Any=12 , SCREAMING_SNAKE_CASE_ : Tuple=12 , SCREAMING_SNAKE_CASE_ : List[Any]=3072 , SCREAMING_SNAKE_CASE_ : List[str]="gelu" , SCREAMING_SNAKE_CASE_ : str=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : Any=512 , SCREAMING_SNAKE_CASE_ : int=2 , SCREAMING_SNAKE_CASE_ : str=0.0_2 , SCREAMING_SNAKE_CASE_ : Optional[int]=1e-12 , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : List[str]=1 , SCREAMING_SNAKE_CASE_ : List[str]=0 , SCREAMING_SNAKE_CASE_ : Tuple=2 , **SCREAMING_SNAKE_CASE_ : Dict , ):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = vocab_size
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = hidden_size
lowerCamelCase__ = visual_embedding_dim
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = initializer_range
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = bypass_transformer
lowerCamelCase__ = special_visual_initialize
| 258 |
"""simple docstring"""
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def _A ( __lowercase , __lowercase , __lowercase , __lowercase ):
"""simple docstring"""
lowerCamelCase__ = BigBirdConfig.from_json_file(__lowercase )
print(f"""Building PyTorch model from configuration: {config}""" )
if is_trivia_qa:
lowerCamelCase__ = BigBirdForQuestionAnswering(__lowercase )
else:
lowerCamelCase__ = BigBirdForPreTraining(__lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(__lowercase , __lowercase , is_trivia_qa=__lowercase )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__lowercase )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--big_bird_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head."""
)
__magic_name__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 258 | 1 |
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
UpperCAmelCase =logging.get_logger(__name__)
class lowerCamelCase__ ( lowercase_ ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ) -> Union[str, Any]:
super().__init__()
A = nn.ModuleList(lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = False ,lowerCamelCase_ = True ,) -> Tuple:
for i, (image, scale, controlnet) in enumerate(zip(lowerCamelCase_ ,lowerCamelCase_ ,self.nets ) ):
A = controlnet(
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,)
# merge samples
if i == 0:
A = down_samples, mid_sample
else:
A = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowerCamelCase_ ,lowerCamelCase_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = True ,lowerCamelCase_ = None ,lowerCamelCase_ = False ,lowerCamelCase_ = None ,) -> Optional[Any]:
A = 0
A = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowerCamelCase_ ,is_main_process=lowerCamelCase_ ,save_function=lowerCamelCase_ ,safe_serialization=lowerCamelCase_ ,variant=lowerCamelCase_ ,)
idx += 1
A = model_path_to_save + f'_{idx}'
@classmethod
def UpperCamelCase__ ( cls ,lowerCamelCase_ ,**lowerCamelCase_ ) -> Optional[int]:
A = 0
A = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
A = pretrained_model_path
while os.path.isdir(lowerCamelCase_ ):
A = ControlNetModel.from_pretrained(lowerCamelCase_ ,**lowerCamelCase_ )
controlnets.append(lowerCamelCase_ )
idx += 1
A = pretrained_model_path + f'_{idx}'
logger.info(f'{len(lowerCamelCase_ )} controlnets loaded from {pretrained_model_path}.' )
if len(lowerCamelCase_ ) == 0:
raise ValueError(
f'No ControlNets found under {os.path.dirname(lowerCamelCase_ )}. Expected at least {pretrained_model_path + "_0"}.' )
return cls(lowerCamelCase_ )
| 617 |
'''simple docstring'''
import os
import sys
import unittest
__UpperCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__UpperCAmelCase = os.path.join(git_repo_path, """src""", """transformers""")
__UpperCAmelCase = """
{0} = None
"""
__UpperCAmelCase = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
"""
__UpperCAmelCase = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = find_backend(""" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")""" )
self.assertIsNone(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = find_backend(""" if not is_tokenizers_available():""" )
self.assertEqual(lowerCamelCase_ , """tokenizers""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = find_backend(""" if not is_tensorflow_text_available():""" )
self.assertEqual(lowerCamelCase_ , """tensorflow_text""" )
SCREAMING_SNAKE_CASE : List[str] = find_backend(""" if not (is_sentencepiece_available() and is_tokenizers_available()):""" )
self.assertEqual(lowerCamelCase_ , """sentencepiece_and_tokenizers""" )
SCREAMING_SNAKE_CASE : Tuple = find_backend(
""" if not (is_sentencepiece_available() and is_tensorflow_text_available()):""" )
self.assertEqual(lowerCamelCase_ , """sentencepiece_and_tensorflow_text""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = find_backend(
""" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):""" )
self.assertEqual(lowerCamelCase_ , """sentencepiece_and_tokenizers_and_vision""" )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""" , lowerCamelCase_ )
self.assertIn("""tensorflow_text""" , lowerCamelCase_ )
self.assertIn("""sentencepiece_and_tokenizers""" , lowerCamelCase_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""BertModel""" , objects["""torch"""] )
self.assertIn("""TFBertModel""" , objects["""tf"""] )
self.assertIn("""FlaxBertModel""" , objects["""flax"""] )
self.assertIn("""BertModel""" , objects["""torch"""] )
self.assertIn("""TFBertTokenizer""" , objects["""tensorflow_text"""] )
self.assertIn("""convert_slow_tokenizer""" , objects["""sentencepiece_and_tokenizers"""] )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = create_dummy_object("""CONSTANT""" , """'torch'""" )
self.assertEqual(lowerCamelCase_ , """\nCONSTANT = None\n""" )
SCREAMING_SNAKE_CASE : List[Any] = create_dummy_object("""function""" , """'torch'""" )
self.assertEqual(
lowerCamelCase_ , """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
SCREAMING_SNAKE_CASE : str = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
"""
SCREAMING_SNAKE_CASE : Union[str, Any] = create_dummy_object("""FakeClass""" , """'torch'""" )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
"""
SCREAMING_SNAKE_CASE : List[Any] = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""] , lowerCamelCase_ )
| 379 | 0 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Optional[Any] = ["image_processor", "tokenizer"]
__A : int = "LayoutLMv3ImageProcessor"
__A : Tuple = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self , __A=None , __A=None , **__A ):
"""simple docstring"""
lowerCamelCase : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __A , )
lowerCamelCase : Optional[Any] = kwargs.pop("feature_extractor" )
lowerCamelCase : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__A , __A )
def __call__( self , __A , __A = None , __A = None , __A = None , __A = None , __A = True , __A = False , __A = None , __A = None , __A = 0 , __A = None , __A = None , __A = None , __A = False , __A = False , __A = False , __A = False , __A = True , __A = None , **__A , ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
lowerCamelCase : List[Any] = self.image_processor(images=__A , return_tensors=__A )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__A , __A ):
lowerCamelCase : str = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCamelCase : Optional[int] = features["words"]
lowerCamelCase : List[Any] = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_token_type_ids=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
# add pixel values
lowerCamelCase : List[str] = features.pop("pixel_values" )
if return_overflowing_tokens is True:
lowerCamelCase : Optional[int] = self.get_overflowing_images(__A , encoded_inputs["overflow_to_sample_mapping"] )
lowerCamelCase : Any = images
return encoded_inputs
def _snake_case ( self , __A , __A ):
"""simple docstring"""
lowerCamelCase : Tuple = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__A ) != len(__A ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F""" {len(__A )} and {len(__A )}""" )
return images_with_overflow
def _snake_case ( self , *__A , **__A ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__A , **__A )
def _snake_case ( self , *__A , **__A ):
"""simple docstring"""
return self.tokenizer.decode(*__A , **__A )
@property
def _snake_case ( self ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _snake_case ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __A , )
return self.image_processor_class
@property
def _snake_case ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __A , )
return self.image_processor
| 231 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_snake_case = {
'''configuration_mask2former''': [
'''MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Mask2FormerConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''Mask2FormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Mask2FormerForUniversalSegmentation''',
'''Mask2FormerModel''',
'''Mask2FormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 231 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Dict = "mobilenet_v2"
def __init__( self : Union[str, Any] , __lowerCAmelCase : Any=3 , __lowerCAmelCase : str=2_24 , __lowerCAmelCase : Optional[int]=1.0 , __lowerCAmelCase : Dict=8 , __lowerCAmelCase : Any=8 , __lowerCAmelCase : int=6 , __lowerCAmelCase : str=32 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Any="relu6" , __lowerCAmelCase : int=True , __lowerCAmelCase : int=0.8 , __lowerCAmelCase : Optional[Any]=0.02 , __lowerCAmelCase : Dict=0.001 , __lowerCAmelCase : Dict=2_55 , **__lowerCAmelCase : Optional[Any] , ) -> Dict:
super().__init__(**__lowerCAmelCase )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
_A = num_channels
_A = image_size
_A = depth_multiplier
_A = depth_divisible_by
_A = min_depth
_A = expand_ratio
_A = output_stride
_A = first_layer_is_expansion
_A = finegrained_output
_A = hidden_act
_A = tf_padding
_A = classifier_dropout_prob
_A = initializer_range
_A = layer_norm_eps
_A = semantic_loss_ignore_index
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Union[str, Any] = version.parse("1.11")
@property
def snake_case_ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def snake_case_ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def snake_case_ ( self : Optional[int] ) -> float:
return 1E-4
| 2 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :str , _snake_case :Any , _snake_case :int , _snake_case :List[Any] ) -> Optional[int]:
for attribute in key.split('''.''' ):
_A = getattr(_snake_case , _snake_case )
if weight_type is not None:
_A = getattr(_snake_case , _snake_case ).shape
else:
_A = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_A = value
elif weight_type == "weight_g":
_A = value
elif weight_type == "weight_v":
_A = value
elif weight_type == "bias":
_A = value
else:
_A = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :Any , _snake_case :int ) -> Any:
_A = []
_A = fairseq_model.state_dict()
_A = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_A = False
if "conv_layers" in name:
load_conv_layer(
_snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , )
_A = True
else:
for key, mapped_key in MAPPING.items():
_A = '''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_A = True
if "*" in mapped_key:
_A = name.split(_snake_case )[0].split('''.''' )[-2]
_A = mapped_key.replace('''*''' , _snake_case )
if "weight_g" in name:
_A = '''weight_g'''
elif "weight_v" in name:
_A = '''weight_v'''
elif "weight" in name:
_A = '''weight'''
elif "bias" in name:
_A = '''bias'''
else:
_A = None
set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
continue
if not is_used:
unused_weights.append(_snake_case )
logger.warning(F'''Unused weights: {unused_weights}''' )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Tuple , _snake_case :List[str] , _snake_case :List[str] , _snake_case :Optional[int] , _snake_case :List[Any] ) -> Any:
_A = full_name.split('''conv_layers.''' )[-1]
_A = name.split('''.''' )
_A = int(items[0] )
_A = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_A = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_A = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_A = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_A = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :Dict ) -> Tuple:
_A = SEWConfig()
if is_finetuned:
_A = model.wav_encoder.wav_model.cfg
else:
_A = model.cfg
_A = fs_config.conv_bias
_A = eval(fs_config.conv_feature_layers )
_A = [x[0] for x in conv_layers]
_A = [x[1] for x in conv_layers]
_A = [x[2] for x in conv_layers]
_A = '''gelu'''
_A = '''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group'''
_A = 0.0
_A = fs_config.activation_fn.name
_A = fs_config.encoder_embed_dim
_A = 0.02
_A = fs_config.encoder_ffn_embed_dim
_A = 1E-5
_A = fs_config.encoder_layerdrop
_A = fs_config.encoder_attention_heads
_A = fs_config.conv_pos_groups
_A = fs_config.conv_pos
_A = len(_snake_case )
_A = fs_config.encoder_layers
_A = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
_A = model.cfg
_A = fs_config.final_dropout
_A = fs_config.layerdrop
_A = fs_config.activation_dropout
_A = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
_A = fs_config.attention_dropout
_A = fs_config.dropout_input
_A = fs_config.dropout
_A = fs_config.mask_channel_length
_A = fs_config.mask_channel_prob
_A = fs_config.mask_length
_A = fs_config.mask_prob
_A = '''Wav2Vec2FeatureExtractor'''
_A = '''Wav2Vec2CTCTokenizer'''
return config
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] , _snake_case :Union[str, Any] , _snake_case :Optional[Any]=None , _snake_case :Optional[int]=None , _snake_case :Dict=True ) -> List[Any]:
if is_finetuned:
_A , _A , _A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
_A , _A , _A = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
_A = SEWConfig.from_pretrained(_snake_case )
else:
_A = convert_config(model[0] , _snake_case )
_A = model[0].eval()
_A = True if config.feat_extract_norm == '''layer''' else False
_A = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , )
if is_finetuned:
if dict_path:
_A = Dictionary.load(_snake_case )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_A = target_dict.pad_index
_A = target_dict.bos_index
_A = target_dict.pad_index
_A = target_dict.bos_index
_A = target_dict.eos_index
_A = len(target_dict.symbols )
_A = os.path.join(_snake_case , '''vocab.json''' )
if not os.path.isdir(_snake_case ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_snake_case ) )
return
os.makedirs(_snake_case , exist_ok=_snake_case )
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , _snake_case )
_A = WavaVecaCTCTokenizer(
_snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_snake_case , )
_A = WavaVecaProcessor(feature_extractor=_snake_case , tokenizer=_snake_case )
processor.save_pretrained(_snake_case )
_A = SEWForCTC(_snake_case )
else:
_A = SEWModel(_snake_case )
feature_extractor.save_pretrained(_snake_case )
recursively_load_weights(_snake_case , _snake_case , _snake_case )
hf_model.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
UpperCAmelCase_ = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 2 | 1 |
"""simple docstring"""
import torch
def snake_case__ ( ) ->Optional[Any]:
if torch.cuda.is_available():
UpperCAmelCase__ = torch.cuda.device_count()
else:
UpperCAmelCase__ = 0
print(F'''Successfully ran on {num_gpus} GPUs''' )
if __name__ == "__main__":
main()
| 704 |
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _UpperCamelCase ( __UpperCamelCase ):
'''simple docstring'''
def A__ ( self , __lowercase ):
with open(__lowercase , encoding="""utf-8""" ) as input_file:
UpperCAmelCase__ = re.compile(r"""(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)""" )
UpperCAmelCase__ = input_file.read()
UpperCAmelCase__ = regexp.search(__lowercase )
return match
def A__ ( self , __lowercase ):
with open(__lowercase , encoding="""utf-8""" ) as input_file:
UpperCAmelCase__ = re.compile(r"""#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()""" , re.DOTALL )
UpperCAmelCase__ = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCAmelCase__ = regexp.finditer(__lowercase )
UpperCAmelCase__ = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def A__ ( self ):
UpperCAmelCase__ = Path("""./datasets""" )
UpperCAmelCase__ = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__lowercase ) ):
raise AssertionError(F'''open(...) must use utf-8 encoding in {dataset}''' )
def A__ ( self ):
UpperCAmelCase__ = Path("""./datasets""" )
UpperCAmelCase__ = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_print_statements(str(__lowercase ) ):
raise AssertionError(F'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 422 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""ut/deta""": """https://huggingface.co/ut/deta/resolve/main/config.json""",
}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''deta'''
lowerCAmelCase_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : List[str] , _A : Dict=None , _A : Optional[int]=900 , _A : Dict=2048 , _A : int=6 , _A : Dict=2048 , _A : List[Any]=8 , _A : Union[str, Any]=6 , _A : Tuple=1024 , _A : Tuple=8 , _A : Dict=0.0 , _A : List[Any]=True , _A : List[Any]="relu" , _A : Tuple=256 , _A : Union[str, Any]=0.1 , _A : Optional[Any]=0.0 , _A : Dict=0.0 , _A : int=0.02 , _A : int=1.0 , _A : Optional[Any]=True , _A : Any=False , _A : Optional[int]="sine" , _A : Optional[int]=5 , _A : List[Any]=4 , _A : Tuple=4 , _A : Dict=True , _A : Tuple=300 , _A : Union[str, Any]=True , _A : Optional[Any]=True , _A : Optional[Any]=1 , _A : Dict=5 , _A : Optional[int]=2 , _A : Tuple=1 , _A : Union[str, Any]=1 , _A : Union[str, Any]=5 , _A : Tuple=2 , _A : str=0.1 , _A : Optional[int]=0.25 , **_A : Optional[Any] , ):
"""simple docstring"""
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__SCREAMING_SNAKE_CASE : Dict = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] )
else:
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : int = backbone_config.pop('''model_type''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
__SCREAMING_SNAKE_CASE : Any = config_class.from_dict(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = backbone_config
__SCREAMING_SNAKE_CASE : Tuple = num_queries
__SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
__SCREAMING_SNAKE_CASE : Tuple = d_model
__SCREAMING_SNAKE_CASE : Any = encoder_ffn_dim
__SCREAMING_SNAKE_CASE : int = encoder_layers
__SCREAMING_SNAKE_CASE : List[str] = encoder_attention_heads
__SCREAMING_SNAKE_CASE : Tuple = decoder_ffn_dim
__SCREAMING_SNAKE_CASE : str = decoder_layers
__SCREAMING_SNAKE_CASE : Tuple = decoder_attention_heads
__SCREAMING_SNAKE_CASE : Tuple = dropout
__SCREAMING_SNAKE_CASE : int = attention_dropout
__SCREAMING_SNAKE_CASE : Dict = activation_dropout
__SCREAMING_SNAKE_CASE : str = activation_function
__SCREAMING_SNAKE_CASE : List[str] = init_std
__SCREAMING_SNAKE_CASE : Union[str, Any] = init_xavier_std
__SCREAMING_SNAKE_CASE : Optional[Any] = encoder_layerdrop
__SCREAMING_SNAKE_CASE : Tuple = auxiliary_loss
__SCREAMING_SNAKE_CASE : Optional[int] = position_embedding_type
# deformable attributes
__SCREAMING_SNAKE_CASE : Dict = num_feature_levels
__SCREAMING_SNAKE_CASE : Optional[int] = encoder_n_points
__SCREAMING_SNAKE_CASE : Dict = decoder_n_points
__SCREAMING_SNAKE_CASE : List[str] = two_stage
__SCREAMING_SNAKE_CASE : Dict = two_stage_num_proposals
__SCREAMING_SNAKE_CASE : List[Any] = with_box_refine
__SCREAMING_SNAKE_CASE : Optional[Any] = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
__SCREAMING_SNAKE_CASE : Optional[int] = class_cost
__SCREAMING_SNAKE_CASE : List[Any] = bbox_cost
__SCREAMING_SNAKE_CASE : str = giou_cost
# Loss coefficients
__SCREAMING_SNAKE_CASE : Optional[int] = mask_loss_coefficient
__SCREAMING_SNAKE_CASE : Tuple = dice_loss_coefficient
__SCREAMING_SNAKE_CASE : int = bbox_loss_coefficient
__SCREAMING_SNAKE_CASE : str = giou_loss_coefficient
__SCREAMING_SNAKE_CASE : int = eos_coefficient
__SCREAMING_SNAKE_CASE : List[str] = focal_alpha
super().__init__(is_encoder_decoder=_A , **_A )
@property
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
return self.d_model
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = copy.deepcopy(self.__dict__ )
__SCREAMING_SNAKE_CASE : List[Any] = self.backbone_config.to_dict()
__SCREAMING_SNAKE_CASE : Optional[Any] = self.__class__.model_type
return output
| 74 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 0 |
"""simple docstring"""
from __future__ import annotations
UpperCamelCase_ : List[str] = list[list[int]]
# assigning initial values to the grid
UpperCamelCase_ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
UpperCamelCase_ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def A_ (__a , __a , __a , __a ):
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def A_ (__a ):
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def A_ (__a ):
'''simple docstring'''
if location := find_empty_location(__a ):
A_ , A_ = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__a , __a , __a , __a ):
A_ = digit
if sudoku(__a ) is not None:
return grid
A_ = 0
return None
def A_ (__a ):
'''simple docstring'''
for row in grid:
for cell in row:
print(__a , end=" " )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 20)
print_solution(example_grid)
print('''\nExample grid solution:''')
UpperCamelCase_ : int = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''')
| 482 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ : str = {'''configuration_timm_backbone''': ['''TimmBackboneConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Union[str, Any] = ['''TimmBackbone''']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
UpperCamelCase_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 482 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase_ (_UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Any = KandinskyVaaInpaintPipeline
lowerCamelCase : Dict = ['image_embeds', 'negative_image_embeds', 'image', 'mask_image']
lowerCamelCase : List[Any] = [
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
lowerCamelCase : Union[str, Any] = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowerCamelCase : str = False
@property
def lowercase_ ( self ) -> Union[str, Any]:
return 32
@property
def lowercase_ ( self ) -> str:
return 32
@property
def lowercase_ ( self ) -> Tuple:
return self.time_input_dim
@property
def lowercase_ ( self ) -> Optional[Any]:
return self.time_input_dim * 4
@property
def lowercase_ ( self ) -> Optional[Any]:
return 1_00
@property
def lowercase_ ( self ) -> Any:
torch.manual_seed(0 )
__lowerCamelCase : str = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__lowerCamelCase : Union[str, Any] = UNetaDConditionModel(**SCREAMING_SNAKE_CASE_ )
return model
@property
def lowercase_ ( self ) -> List[Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase_ ( self ) -> Optional[int]:
torch.manual_seed(0 )
__lowerCamelCase : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase_ ( self ) -> Any:
__lowerCamelCase : List[str] = self.dummy_unet
__lowerCamelCase : str = self.dummy_movq
__lowerCamelCase : List[str] = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='linear' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , steps_offset=1 , prediction_type='epsilon' , thresholding=SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ) -> Tuple:
__lowerCamelCase : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
SCREAMING_SNAKE_CASE_ )
# create init_image
__lowerCamelCase : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCamelCase : Optional[int] = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE_ ) ).convert('RGB' ).resize((2_56, 2_56) )
# create mask
__lowerCamelCase : Optional[Any] = np.ones((64, 64) , dtype=np.floataa )
__lowerCamelCase : Dict = 0
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
__lowerCamelCase : Any = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
__lowerCamelCase : Optional[int] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = {
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def lowercase_ ( self ) -> str:
__lowerCamelCase : List[str] = 'cpu'
__lowerCamelCase : int = self.get_dummy_components()
__lowerCamelCase : Optional[int] = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) )
__lowerCamelCase : List[str] = output.images
__lowerCamelCase : Dict = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
__lowerCamelCase : int = image[0, -3:, -3:, -1]
__lowerCamelCase : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
print(f'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Dict = np.array(
[0.5_0_7_7_5_9_0_3, 0.4_9_5_2_7_1_9_5, 0.4_8_8_2_4_5_4_3, 0.5_0_1_9_2_2_3_7, 0.4_8_6_4_4_9_0_6, 0.4_9_3_7_3_8_1_4, 0.4_7_8_0_5_9_8, 0.4_7_2_3_4_8_2_7, 0.4_8_3_2_7_8_4_8] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def lowercase_ ( self ) -> List[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' )
__lowerCamelCase : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
__lowerCamelCase : str = np.ones((7_68, 7_68) , dtype=np.floataa )
__lowerCamelCase : str = 0
__lowerCamelCase : Tuple = 'a hat'
__lowerCamelCase : Union[str, Any] = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[Any] = KandinskyVaaInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa )
__lowerCamelCase : Optional[Any] = pipeline.to(SCREAMING_SNAKE_CASE_ )
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = torch.Generator(device='cpu' ).manual_seed(0 )
__lowerCamelCase , __lowerCamelCase : str = pipe_prior(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
__lowerCamelCase : Optional[Any] = pipeline(
image=SCREAMING_SNAKE_CASE_ , mask_image=SCREAMING_SNAKE_CASE_ , image_embeds=SCREAMING_SNAKE_CASE_ , negative_image_embeds=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type='np' , )
__lowerCamelCase : Optional[int] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 13 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
SCREAMING_SNAKE_CASE__ = NewType('''DataClass''', Any)
SCREAMING_SNAKE_CASE__ = NewType('''DataClassType''', Any)
def A ( __UpperCamelCase ) -> List[Any]:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def A ( __UpperCamelCase ) -> Callable[[str], Any]:
A__ = {str(__UpperCamelCase ): choice for choice in choices}
return lambda __UpperCamelCase : str_to_choice.get(__UpperCamelCase , __UpperCamelCase )
def A ( *,
__UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = dataclasses.MISSING , __UpperCamelCase = dataclasses.MISSING , __UpperCamelCase = None , **__UpperCamelCase , ) -> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
A__ = {}
if aliases is not None:
A__ = aliases
if help is not None:
A__ = help
return dataclasses.field(metadata=__UpperCamelCase , default=__UpperCamelCase , default_factory=__UpperCamelCase , **__UpperCamelCase )
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Iterable[DataClassType]
def __init__( self : Optional[int] , _snake_case : Union[DataClassType, Iterable[DataClassType]] , **_snake_case : Tuple ):
"""simple docstring"""
if "formatter_class" not in kwargs:
A__ = ArgumentDefaultsHelpFormatter
super().__init__(**_snake_case )
if dataclasses.is_dataclass(_snake_case ):
A__ = [dataclass_types]
A__ = list(_snake_case )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_snake_case )
@staticmethod
def _a ( _snake_case : ArgumentParser , _snake_case : dataclasses.Field ):
"""simple docstring"""
A__ = F'''--{field.name}'''
A__ = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , _snake_case ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
A__ = kwargs.pop('aliases' , [] )
if isinstance(_snake_case , _snake_case ):
A__ = [aliases]
A__ = getattr(field.type , '__origin__' , field.type )
if origin_type is Union or (hasattr(_snake_case , 'UnionType' ) and isinstance(_snake_case , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(_snake_case ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
F''' Problem encountered in field \'{field.name}\'.''' )
if type(_snake_case ) not in field.type.__args__:
# filter `str` in Union
A__ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
A__ = getattr(field.type , '__origin__' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
A__ = (
field.type.__args__[0] if isinstance(_snake_case , field.type.__args__[1] ) else field.type.__args__[1]
)
A__ = getattr(field.type , '__origin__' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
A__ = {}
if origin_type is Literal or (isinstance(field.type , _snake_case ) and issubclass(field.type , _snake_case )):
if origin_type is Literal:
A__ = field.type.__args__
else:
A__ = [x.value for x in field.type]
A__ = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
A__ = field.default
else:
A__ = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
A__ = copy(_snake_case )
# Hack because type=bool in argparse does not behave as we want.
A__ = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
A__ = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
A__ = default
# This tells argparse we accept 0 or 1 value after --field_name
A__ = '?'
# This is the value that will get picked if we do --field_name (without value)
A__ = True
elif isclass(_snake_case ) and issubclass(_snake_case , _snake_case ):
A__ = field.type.__args__[0]
A__ = '+'
if field.default_factory is not dataclasses.MISSING:
A__ = field.default_factory()
elif field.default is dataclasses.MISSING:
A__ = True
else:
A__ = field.type
if field.default is not dataclasses.MISSING:
A__ = field.default
elif field.default_factory is not dataclasses.MISSING:
A__ = field.default_factory()
else:
A__ = True
parser.add_argument(_snake_case , *_snake_case , **_snake_case )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
A__ = False
parser.add_argument(F'''--no_{field.name}''' , action='store_false' , dest=field.name , **_snake_case )
def _a ( self : Any , _snake_case : DataClassType ):
"""simple docstring"""
if hasattr(_snake_case , '_argument_group_name' ):
A__ = self.add_argument_group(dtype._argument_group_name )
else:
A__ = self
try:
A__ = get_type_hints(_snake_case )
except NameError:
raise RuntimeError(
F'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_snake_case ):
A__ = '.'.join(map(_snake_case , sys.version_info[:3] ) )
raise RuntimeError(
F'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(_snake_case ):
if not field.init:
continue
A__ = type_hints[field.name]
self._parse_dataclass_field(_snake_case , _snake_case )
def _a ( self : Optional[int] , _snake_case : Optional[Any]=None , _snake_case : Any=False , _snake_case : int=True , _snake_case : List[Any]=None , _snake_case : int=None , ):
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
A__ = []
if args_filename:
args_files.append(Path(_snake_case ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
A__ = ArgumentParser()
args_file_parser.add_argument(_snake_case , type=_snake_case , action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
A__ , A__ = args_file_parser.parse_known_args(args=_snake_case )
A__ = vars(_snake_case ).get(args_file_flag.lstrip('-' ) , _snake_case )
if cmd_args_file_paths:
args_files.extend([Path(_snake_case ) for p in cmd_args_file_paths] )
A__ = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
A__ = file_args + args if args is not None else file_args + sys.argv[1:]
A__ , A__ = self.parse_known_args(args=_snake_case )
A__ = []
for dtype in self.dataclass_types:
A__ = {f.name for f in dataclasses.fields(_snake_case ) if f.init}
A__ = {k: v for k, v in vars(_snake_case ).items() if k in keys}
for k in keys:
delattr(_snake_case , _snake_case )
A__ = dtype(**_snake_case )
outputs.append(_snake_case )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(_snake_case )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def _a ( self : Dict , _snake_case : Dict[str, Any] , _snake_case : bool = False ):
"""simple docstring"""
A__ = set(args.keys() )
A__ = []
for dtype in self.dataclass_types:
A__ = {f.name for f in dataclasses.fields(_snake_case ) if f.init}
A__ = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
A__ = dtype(**_snake_case )
outputs.append(_snake_case )
if not allow_extra_keys and unused_keys:
raise ValueError(F'''Some keys are not used by the HfArgumentParser: {sorted(_snake_case )}''' )
return tuple(_snake_case )
def _a ( self : Dict , _snake_case : str , _snake_case : bool = False ):
"""simple docstring"""
with open(Path(_snake_case ) , encoding='utf-8' ) as open_json_file:
A__ = json.loads(open_json_file.read() )
A__ = self.parse_dict(_snake_case , allow_extra_keys=_snake_case )
return tuple(_snake_case )
def _a ( self : Tuple , _snake_case : str , _snake_case : bool = False ):
"""simple docstring"""
A__ = self.parse_dict(yaml.safe_load(Path(_snake_case ).read_text() ) , allow_extra_keys=_snake_case )
return tuple(_snake_case )
| 9 | 0 |
from collections.abc import Callable
import numpy as np
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
lowercase__ : Any = int(np.ceil((x_end - xa) / step_size ) )
lowercase__ : Optional[int] = np.zeros((n + 1,) )
lowercase__ : int = ya
lowercase__ : int = xa
for k in range(_UpperCamelCase ):
lowercase__ : int = y[k] + step_size * ode_func(_UpperCamelCase ,y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 721 |
from math import sqrt
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> int:
lowercase__ : Optional[Any] = 0
for i in range(1 ,int(sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) ):
if n % i == 0 and i != sqrt(SCREAMING_SNAKE_CASE_ ):
total += i + n // i
elif i == sqrt(SCREAMING_SNAKE_CASE_ ):
total += i
return total - n
def snake_case_ ( SCREAMING_SNAKE_CASE_ = 1_00_00 ) -> int:
lowercase__ : str = sum(
i
for i in range(1 ,SCREAMING_SNAKE_CASE_ )
if sum_of_divisors(sum_of_divisors(SCREAMING_SNAKE_CASE_ ) ) == i and sum_of_divisors(SCREAMING_SNAKE_CASE_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 298 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCamelCase__ ( self ) -> Any:
torch.manual_seed(0 )
A = UNetaDModel(
block_out_channels=(3_2, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=3 ,out_channels=3 ,down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") ,up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") ,)
return model
def UpperCamelCase__ ( self ) -> int:
A = self.dummy_uncond_unet
A = PNDMScheduler()
A = PNDMPipeline(unet=__a ,scheduler=__a )
pndm.to(__a )
pndm.set_progress_bar_config(disable=__a )
A = torch.manual_seed(0 )
A = pndm(generator=__a ,num_inference_steps=2_0 ,output_type="""numpy""" ).images
A = torch.manual_seed(0 )
A = pndm(generator=__a ,num_inference_steps=2_0 ,output_type="""numpy""" ,return_dict=__a )[0]
A = image[0, -3:, -3:, -1]
A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
A = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> Optional[Any]:
A = 'google/ddpm-cifar10-32'
A = UNetaDModel.from_pretrained(__a )
A = PNDMScheduler()
A = PNDMPipeline(unet=__a ,scheduler=__a )
pndm.to(__a )
pndm.set_progress_bar_config(disable=__a )
A = torch.manual_seed(0 )
A = pndm(generator=__a ,output_type="""numpy""" ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
A = np.array([0.15_64, 0.1_46_45, 0.14_06, 0.1_47_15, 0.1_24_25, 0.1_40_45, 0.1_31_15, 0.1_21_75, 0.1_25] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 617 |
'''simple docstring'''
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class snake_case__ ( unittest.TestCase ):
def A_ ( self : int , __a : Optional[int] , __a : Union[str, Any] ) -> str:
'''simple docstring'''
return f'''gaussian_noise_s={seed}_shape={"_".join([str(__a ) for s in shape] )}.npy'''
def A_ ( self : Any ) -> List[str]:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def A_ ( self : List[Any] , __a : Union[str, Any]=0 , __a : List[str]=(4, 4, 64, 64) , __a : Optional[Any]=False ) -> List[str]:
'''simple docstring'''
__snake_case : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
__snake_case : List[Any] = jnp.array(load_hf_numpy(self.get_file_format(__a , __a ) ) , dtype=__a )
return image
def A_ ( self : Any , __a : Any=False , __a : Dict="CompVis/stable-diffusion-v1-4" ) -> Optional[int]:
'''simple docstring'''
__snake_case : Tuple = jnp.bfloataa if fpaa else jnp.floataa
__snake_case : int = 'bf16' if fpaa else None
__snake_case , __snake_case : Union[str, Any] = FlaxUNetaDConditionModel.from_pretrained(
__a , subfolder='unet' , dtype=__a , revision=__a )
return model, params
def A_ ( self : Any , __a : Dict=0 , __a : Dict=(4, 77, 768) , __a : List[str]=False ) -> List[Any]:
'''simple docstring'''
__snake_case : List[Any] = jnp.bfloataa if fpaa else jnp.floataa
__snake_case : Any = jnp.array(load_hf_numpy(self.get_file_format(__a , __a ) ) , dtype=__a )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_3_2_3, -0.1_3_0_4, 0.0_8_1_3, -0.3_0_9_3, -0.0_9_1_9, -0.1_5_7_1, -0.1_1_2_5, -0.5_8_0_6]],
[17, 0.5_5, [-0.0_8_3_1, -0.2_4_4_3, 0.0_9_0_1, -0.0_9_1_9, 0.3_3_9_6, 0.0_1_0_3, -0.3_7_4_3, 0.0_7_0_1]],
[8, 0.8_9, [-0.4_8_6_3, 0.0_8_5_9, 0.0_8_7_5, -0.1_6_5_8, 0.9_1_9_9, -0.0_1_1_4, 0.4_8_3_9, 0.4_6_3_9]],
[3, 1000, [-0.5_6_4_9, 0.2_4_0_2, -0.5_5_1_8, 0.1_2_4_8, 1.1_3_2_8, -0.2_4_4_3, -0.0_3_2_5, -1.0_0_7_8]],
# fmt: on
] )
def A_ ( self : List[Any] , __a : List[str] , __a : Union[str, Any] , __a : Any ) -> Dict:
'''simple docstring'''
__snake_case , __snake_case : Tuple = self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4' , fpaa=__a )
__snake_case : Tuple = self.get_latents(__a , fpaa=__a )
__snake_case : int = self.get_encoder_hidden_states(__a , fpaa=__a )
__snake_case : List[str] = model.apply(
{'params': params} , __a , jnp.array(__a , dtype=jnp.intaa ) , encoder_hidden_states=__a , ).sample
assert sample.shape == latents.shape
__snake_case : List[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__snake_case : str = jnp.array(__a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(__a , __a , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_5_1_4, 0.0_8_0_7, 0.1_6_2_4, 0.1_0_1_6, -0.1_8_9_6, 0.0_2_6_3, 0.0_6_7_7, 0.2_3_1_0]],
[17, 0.5_5, [0.1_1_6_4, -0.0_2_1_6, 0.0_1_7_0, 0.1_5_8_9, -0.3_1_2_0, 0.1_0_0_5, -0.0_5_8_1, -0.1_4_5_8]],
[8, 0.8_9, [-0.1_7_5_8, -0.0_1_6_9, 0.1_0_0_4, -0.1_4_1_1, 0.1_3_1_2, 0.1_1_0_3, -0.1_9_9_6, 0.2_1_3_9]],
[3, 1000, [0.1_2_1_4, 0.0_3_5_2, -0.0_7_3_1, -0.1_5_6_2, -0.0_9_9_4, -0.0_9_0_6, -0.2_3_4_0, -0.0_5_3_9]],
# fmt: on
] )
def A_ ( self : str , __a : Optional[int] , __a : Union[str, Any] , __a : Tuple ) -> Optional[Any]:
'''simple docstring'''
__snake_case , __snake_case : int = self.get_unet_model(model_id='stabilityai/stable-diffusion-2' , fpaa=__a )
__snake_case : int = self.get_latents(__a , shape=(4, 4, 96, 96) , fpaa=__a )
__snake_case : Optional[Any] = self.get_encoder_hidden_states(__a , shape=(4, 77, 1024) , fpaa=__a )
__snake_case : List[str] = model.apply(
{'params': params} , __a , jnp.array(__a , dtype=jnp.intaa ) , encoder_hidden_states=__a , ).sample
assert sample.shape == latents.shape
__snake_case : List[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__snake_case : int = jnp.array(__a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(__a , __a , atol=1e-2 )
| 286 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'bert-generation'
def __init__( self , SCREAMING_SNAKE_CASE_=50358 , SCREAMING_SNAKE_CASE_=1024 , SCREAMING_SNAKE_CASE_=24 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=4096 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_="absolute" , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = hidden_act
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = position_embedding_type
lowerCamelCase_ = use_cache
| 384 |
'''simple docstring'''
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def _UpperCamelCase ( __UpperCamelCase ) -> Optional[int]:
print('Loading config file...' )
def flatten_yaml_as_dict(__UpperCamelCase ,__UpperCamelCase="" ,__UpperCamelCase="." ):
lowerCamelCase_ = []
for k, v in d.items():
lowerCamelCase_ = parent_key + sep + k if parent_key else k
if isinstance(__UpperCamelCase ,collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(__UpperCamelCase ,__UpperCamelCase ,sep=__UpperCamelCase ).items() )
else:
items.append((new_key, v) )
return dict(__UpperCamelCase )
lowerCamelCase_ = argparse.Namespace()
with open(__UpperCamelCase ,'r' ) as yaml_file:
try:
lowerCamelCase_ = yaml.load(__UpperCamelCase ,Loader=yaml.FullLoader )
lowerCamelCase_ = flatten_yaml_as_dict(__UpperCamelCase )
for k, v in flat_cfg.items():
setattr(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
except yaml.YAMLError as exc:
logger.error('Error while loading config file: {}. Error message: {}'.format(__UpperCamelCase ,str(__UpperCamelCase ) ) )
return config
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
lowerCamelCase_ = MobileViTVaConfig()
lowerCamelCase_ = False
# dataset
if task_name.startswith('imagenet1k_' ):
lowerCamelCase_ = 10_00
if int(task_name.strip().split('_' )[-1] ) == 3_84:
lowerCamelCase_ = 3_84
else:
lowerCamelCase_ = 2_56
lowerCamelCase_ = 'imagenet-1k-id2label.json'
elif task_name.startswith('imagenet21k_to_1k_' ):
lowerCamelCase_ = 2_10_00
if int(task_name.strip().split('_' )[-1] ) == 3_84:
lowerCamelCase_ = 3_84
else:
lowerCamelCase_ = 2_56
lowerCamelCase_ = 'imagenet-22k-id2label.json'
elif task_name.startswith('ade20k_' ):
lowerCamelCase_ = 1_51
lowerCamelCase_ = 5_12
lowerCamelCase_ = 'ade20k-id2label.json'
lowerCamelCase_ = True
elif task_name.startswith('voc_' ):
lowerCamelCase_ = 21
lowerCamelCase_ = 5_12
lowerCamelCase_ = 'pascal-voc-id2label.json'
lowerCamelCase_ = True
# orig_config
lowerCamelCase_ = load_orig_config_file(__UpperCamelCase )
assert getattr(__UpperCamelCase ,'model.classification.name' ,-1 ) == "mobilevit_v2", "Invalid model"
lowerCamelCase_ = getattr(__UpperCamelCase ,'model.classification.mitv2.width_multiplier' ,1.0 )
assert (
getattr(__UpperCamelCase ,'model.classification.mitv2.attn_norm_layer' ,-1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
lowerCamelCase_ = getattr(__UpperCamelCase ,'model.classification.activation.name' ,'swish' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
lowerCamelCase_ = getattr(__UpperCamelCase ,'model.segmentation.output_stride' ,16 )
if "_deeplabv3" in task_name:
lowerCamelCase_ = getattr(__UpperCamelCase ,'model.segmentation.deeplabv3.aspp_rates' ,[12, 24, 36] )
lowerCamelCase_ = getattr(__UpperCamelCase ,'model.segmentation.deeplabv3.aspp_out_channels' ,5_12 )
lowerCamelCase_ = getattr(__UpperCamelCase ,'model.segmentation.deeplabv3.aspp_dropout' ,0.1 )
# id2label
lowerCamelCase_ = 'huggingface/label-files'
lowerCamelCase_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type='dataset' ) ,'r' ) )
lowerCamelCase_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase_ = idalabel
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
return config
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
lowerCamelCase_ = dct.pop(__UpperCamelCase )
lowerCamelCase_ = val
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase=False ) -> int:
if base_model:
lowerCamelCase_ = ''
else:
lowerCamelCase_ = 'mobilevitv2.'
lowerCamelCase_ = []
for k in state_dict.keys():
if k[:8] == "encoder.":
lowerCamelCase_ = k[8:]
else:
lowerCamelCase_ = k
if ".block." in k:
lowerCamelCase_ = k_new.replace('.block.' ,'.' )
if ".conv." in k:
lowerCamelCase_ = k_new.replace('.conv.' ,'.convolution.' )
if ".norm." in k:
lowerCamelCase_ = k_new.replace('.norm.' ,'.normalization.' )
if "conv_1." in k:
lowerCamelCase_ = k_new.replace('conv_1.' ,f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
lowerCamelCase_ = k_new.replace(f'''layer_{i}.''' ,f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
lowerCamelCase_ = k_new.replace('.exp_1x1.' ,'.expand_1x1.' )
if ".red_1x1." in k:
lowerCamelCase_ = k_new.replace('.red_1x1.' ,'.reduce_1x1.' )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
lowerCamelCase_ = k_new.replace(f'''layer_{i}.0.''' ,f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
lowerCamelCase_ = k_new.replace(f'''layer_{i}.1.local_rep.0.''' ,f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
lowerCamelCase_ = k_new.replace(f'''layer_{i}.1.local_rep.1.''' ,f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
lowerCamelCase_ = [0, 1]
elif i == 4:
lowerCamelCase_ = [0, 1, 2, 3]
elif i == 5:
lowerCamelCase_ = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
lowerCamelCase_ = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''' ,f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
lowerCamelCase_ = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''' ,f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
lowerCamelCase_ = k_new.replace(f'''layer_{i}.1.conv_proj.''' ,f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
lowerCamelCase_ = k_new.replace('pre_norm_attn.0.' ,'layernorm_before.' )
if "pre_norm_attn.1." in k:
lowerCamelCase_ = k_new.replace('pre_norm_attn.1.' ,'attention.' )
if "pre_norm_ffn.0." in k:
lowerCamelCase_ = k_new.replace('pre_norm_ffn.0.' ,'layernorm_after.' )
if "pre_norm_ffn.1." in k:
lowerCamelCase_ = k_new.replace('pre_norm_ffn.1.' ,'ffn.conv1.' )
if "pre_norm_ffn.3." in k:
lowerCamelCase_ = k_new.replace('pre_norm_ffn.3.' ,'ffn.conv2.' )
if "classifier.1." in k:
lowerCamelCase_ = k_new.replace('classifier.1.' ,'classifier.' )
if "seg_head." in k:
lowerCamelCase_ = k_new.replace('seg_head.' ,'segmentation_head.' )
if ".aspp_layer." in k:
lowerCamelCase_ = k_new.replace('.aspp_layer.' ,'.' )
if ".aspp_pool." in k:
lowerCamelCase_ = k_new.replace('.aspp_pool.' ,'.' )
rename_keys.append((k, k_new) )
return rename_keys
def _UpperCamelCase ( __UpperCamelCase ) -> Dict:
lowerCamelCase_ = []
for k in state_dict.keys():
if k.startswith('seg_head.aux_head.' ):
keys_to_ignore.append(__UpperCamelCase )
for k in keys_to_ignore:
state_dict.pop(__UpperCamelCase ,__UpperCamelCase )
def _UpperCamelCase ( ) -> Any:
lowerCamelCase_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
lowerCamelCase_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[Any]:
lowerCamelCase_ = get_mobilevitva_config(__UpperCamelCase ,__UpperCamelCase )
# load original state_dict
lowerCamelCase_ = torch.load(__UpperCamelCase ,map_location='cpu' )
# load huggingface model
if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ):
lowerCamelCase_ = MobileViTVaForSemanticSegmentation(__UpperCamelCase ).eval()
lowerCamelCase_ = False
else:
lowerCamelCase_ = MobileViTVaForImageClassification(__UpperCamelCase ).eval()
lowerCamelCase_ = False
# remove and rename some keys of load the original model
lowerCamelCase_ = checkpoint
remove_unused_keys(__UpperCamelCase )
lowerCamelCase_ = create_rename_keys(__UpperCamelCase ,base_model=__UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# load modified state_dict
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCamelCase_ = MobileViTImageProcessor(crop_size=config.image_size ,size=config.image_size + 32 )
lowerCamelCase_ = image_processor(images=prepare_img() ,return_tensors='pt' )
lowerCamelCase_ = model(**__UpperCamelCase )
# verify classification model
if task_name.startswith('imagenet' ):
lowerCamelCase_ = outputs.logits
lowerCamelCase_ = logits.argmax(-1 ).item()
print('Predicted class:' ,model.config.idalabel[predicted_class_idx] )
if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
lowerCamelCase_ = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] )
assert torch.allclose(logits[0, :3] ,__UpperCamelCase ,atol=1e-4 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
A_ = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 384 | 1 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class A_ (a_ ):
def __init__( self , _A , _A , _A ):
'''simple docstring'''
UpperCAmelCase = dataset
UpperCAmelCase = process
UpperCAmelCase = params
def __len__( self ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self , _A ):
'''simple docstring'''
UpperCAmelCase = self.dataset[i]
UpperCAmelCase = self.process(_A , **self.params )
return processed
class A_ (a_ ):
def __init__( self , _A , _A , _A , _A=None ):
'''simple docstring'''
UpperCAmelCase = loader
UpperCAmelCase = infer
UpperCAmelCase = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
UpperCAmelCase = None
UpperCAmelCase = loader_batch_size
# Internal bookkeeping
UpperCAmelCase = None
UpperCAmelCase = None
def __len__( self ):
'''simple docstring'''
return len(self.loader )
def __iter__( self ):
'''simple docstring'''
UpperCAmelCase = iter(self.loader )
return self
def _lowercase ( self ):
'''simple docstring'''
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
UpperCAmelCase = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
UpperCAmelCase = {}
for k, element in self._loader_batch_data.items():
if isinstance(_A , _A ):
# Convert ModelOutput to tuple first
UpperCAmelCase = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_A , _A ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
UpperCAmelCase = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
UpperCAmelCase = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
UpperCAmelCase = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
UpperCAmelCase = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
UpperCAmelCase = self._loader_batch_data.__class__(_A )
self._loader_batch_index += 1
return result
def _lowercase ( self ):
'''simple docstring'''
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
UpperCAmelCase = next(self.iterator )
UpperCAmelCase = self.infer(_A , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_A , torch.Tensor ):
UpperCAmelCase = processed
else:
UpperCAmelCase = list(processed.keys() )[0]
UpperCAmelCase = processed[key]
if isinstance(_A , _A ):
UpperCAmelCase = len(_A )
else:
UpperCAmelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
UpperCAmelCase = observed_batch_size
# Setting internal index to unwrap the batch
UpperCAmelCase = processed
UpperCAmelCase = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class A_ (a_ ):
def __init__( self , _A , _A , _A , _A=None ):
'''simple docstring'''
super().__init__(_A , _A , _A )
def __iter__( self ):
'''simple docstring'''
UpperCAmelCase = iter(self.loader )
UpperCAmelCase = None
return self
def _lowercase ( self ):
'''simple docstring'''
if self.subiterator is None:
UpperCAmelCase = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
UpperCAmelCase = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
UpperCAmelCase = self.infer(next(self.iterator ) , **self.params )
UpperCAmelCase = next(self.subiterator )
return processed
class A_ (a_ ):
def __iter__( self ):
'''simple docstring'''
UpperCAmelCase = iter(self.loader )
return self
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = False
UpperCAmelCase = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
UpperCAmelCase = self.loader_batch_item()
UpperCAmelCase = item.pop('''is_last''' )
accumulator.append(_A )
if is_last:
return accumulator
while not is_last:
UpperCAmelCase = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_A , torch.Tensor ):
UpperCAmelCase = processed
else:
UpperCAmelCase = list(processed.keys() )[0]
UpperCAmelCase = processed[key]
if isinstance(_A , _A ):
UpperCAmelCase = len(_A )
else:
UpperCAmelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
UpperCAmelCase = observed_batch_size
UpperCAmelCase = processed
UpperCAmelCase = 0
while self._loader_batch_index < self.loader_batch_size:
UpperCAmelCase = self.loader_batch_item()
UpperCAmelCase = item.pop('''is_last''' )
accumulator.append(_A )
if is_last:
return accumulator
else:
UpperCAmelCase = processed
UpperCAmelCase = item.pop('''is_last''' )
accumulator.append(_A )
return accumulator
class A_ (a_ ):
def __init__( self , _A , _A ):
'''simple docstring'''
UpperCAmelCase = dataset
UpperCAmelCase = key
def __len__( self ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self , _A ):
'''simple docstring'''
return self.dataset[i][self.key]
class A_ (a_ ):
def __init__( self , _A , _A , _A ):
'''simple docstring'''
UpperCAmelCase = dataset
UpperCAmelCase = keya
UpperCAmelCase = keya
def __len__( self ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self , _A ):
'''simple docstring'''
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 130 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> str | Literal[False]:
'''simple docstring'''
UpperCAmelCase = list(UpperCamelCase__ )
UpperCAmelCase = list(UpperCamelCase__ )
UpperCAmelCase = 0
for i in range(len(UpperCamelCase__ ) ):
if lista[i] != lista[i]:
count += 1
UpperCAmelCase = '''_'''
if count > 1:
return False
else:
return "".join(UpperCamelCase__ )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> list[str]:
'''simple docstring'''
UpperCAmelCase = []
while True:
UpperCAmelCase = ['''$'''] * len(UpperCamelCase__ )
UpperCAmelCase = []
for i in range(len(UpperCamelCase__ ) ):
for j in range(i + 1 , len(UpperCamelCase__ ) ):
UpperCAmelCase = compare_string(binary[i] , binary[j] )
if k is False:
UpperCAmelCase = '''*'''
UpperCAmelCase = '''*'''
temp.append('''X''' )
for i in range(len(UpperCamelCase__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(UpperCamelCase__ ) == 0:
return pi
UpperCAmelCase = list(set(UpperCamelCase__ ) )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> list[str]:
'''simple docstring'''
UpperCAmelCase = []
for minterm in minterms:
UpperCAmelCase = ''''''
for _ in range(UpperCamelCase__ ):
UpperCAmelCase = str(minterm % 2 ) + string
minterm //= 2
temp.append(UpperCamelCase__ )
return temp
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> bool:
'''simple docstring'''
UpperCAmelCase = list(UpperCamelCase__ )
UpperCAmelCase = list(UpperCamelCase__ )
UpperCAmelCase = 0
for i in range(len(UpperCamelCase__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> list[str]:
'''simple docstring'''
UpperCAmelCase = []
UpperCAmelCase = [0] * len(UpperCamelCase__ )
for i in range(len(chart[0] ) ):
UpperCAmelCase = 0
UpperCAmelCase = -1
for j in range(len(UpperCamelCase__ ) ):
if chart[j][i] == 1:
count += 1
UpperCAmelCase = j
if count == 1:
UpperCAmelCase = 1
for i in range(len(UpperCamelCase__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(UpperCamelCase__ ) ):
UpperCAmelCase = 0
temp.append(prime_implicants[i] )
while True:
UpperCAmelCase = 0
UpperCAmelCase = -1
UpperCAmelCase = 0
for i in range(len(UpperCamelCase__ ) ):
UpperCAmelCase = chart[i].count(1 )
if count_n > max_n:
UpperCAmelCase = count_n
UpperCAmelCase = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(UpperCamelCase__ ) ):
UpperCAmelCase = 0
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> list[list[int]]:
'''simple docstring'''
UpperCAmelCase = [[0 for x in range(len(UpperCamelCase__ ) )] for x in range(len(UpperCamelCase__ ) )]
for i in range(len(UpperCamelCase__ ) ):
UpperCAmelCase = prime_implicants[i].count('''_''' )
for j in range(len(UpperCamelCase__ ) ):
if is_for_table(prime_implicants[i] , binary[j] , UpperCamelCase__ ):
UpperCAmelCase = 1
return chart
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
UpperCAmelCase = int(input('''Enter the no. of variables\n''' ) )
UpperCAmelCase = [
float(UpperCamelCase__ )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
UpperCAmelCase = decimal_to_binary(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = check(UpperCamelCase__ )
print('''Prime Implicants are:''' )
print(UpperCamelCase__ )
UpperCAmelCase = prime_implicant_chart(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = selection(UpperCamelCase__ , UpperCamelCase__ )
print('''Essential Prime Implicants are:''' )
print(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 130 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowerCAmelCase_ = logging.get_logger(__name__)
class _snake_case ( __snake_case ):
"""simple docstring"""
a = ["input_features", "attention_mask"]
def __init__( self : List[Any] , _A : List[str]=8_0 , _A : Tuple=1_6_0_0_0 , _A : Union[str, Any]=8_0 , _A : Union[str, Any]=0.0 , _A : Optional[int]=True , _A : Optional[int]=True , _A : List[Any]=True , **_A : Dict , ):
"""simple docstring"""
super().__init__(feature_size=_A , sampling_rate=_A , padding_value=_A , **_A)
_SCREAMING_SNAKE_CASE : List[str] = num_mel_bins
_SCREAMING_SNAKE_CASE : Any = do_ceptral_normalize
_SCREAMING_SNAKE_CASE : List[str] = normalize_means
_SCREAMING_SNAKE_CASE : Any = normalize_vars
_SCREAMING_SNAKE_CASE : Any = True
def _lowerCAmelCase ( self : Optional[Any] , _A : np.ndarray , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers
_SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(_A).unsqueeze(0)
_SCREAMING_SNAKE_CASE : List[Any] = ta_kaldi.fbank(_A , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate)
return features.numpy()
@staticmethod
def _lowerCAmelCase ( _A : np.ndarray , _A : int , _A : Optional[bool] = True , _A : Optional[bool] = True , _A : float = 0.0 , ):
"""simple docstring"""
if normalize_means:
_SCREAMING_SNAKE_CASE : Tuple = x[:input_length].mean(axis=0)
_SCREAMING_SNAKE_CASE : Any = np.subtract(_A , _A)
if normalize_vars:
_SCREAMING_SNAKE_CASE : List[str] = x[:input_length].std(axis=0)
_SCREAMING_SNAKE_CASE : List[str] = np.divide(_A , _A)
if input_length < x.shape[0]:
_SCREAMING_SNAKE_CASE : Tuple = padding_value
# make sure array is in float32
_SCREAMING_SNAKE_CASE : List[str] = x.astype(np.floataa)
return x
def _lowerCAmelCase ( self : List[str] , _A : List[np.ndarray] , _A : Optional[np.ndarray] = None):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = attention_mask.sum(-1) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(_A , _A , self.normalize_means , self.normalize_vars , self.padding_value)
for x, n in zip(_A , _A)
]
def __call__( self : Tuple , _A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _A : Union[bool, str, PaddingStrategy] = False , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[int] = None , _A : Optional[bool] = None , **_A : Union[str, Any] , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""")
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""")
_SCREAMING_SNAKE_CASE : Tuple = isinstance(_A , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""")
_SCREAMING_SNAKE_CASE : Any = is_batched_numpy or (
isinstance(_A , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
_SCREAMING_SNAKE_CASE : int = [np.asarray(_A , dtype=np.floataa) for speech in raw_speech]
elif not is_batched and not isinstance(_A , np.ndarray):
_SCREAMING_SNAKE_CASE : Tuple = np.asarray(_A , dtype=np.floataa)
elif isinstance(_A , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
_SCREAMING_SNAKE_CASE : Optional[Any] = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
_SCREAMING_SNAKE_CASE : List[Any] = [raw_speech]
# extract fbank features
_SCREAMING_SNAKE_CASE : List[str] = [self._extract_fbank_features(_A) for waveform in raw_speech]
# convert into correct format for padding
_SCREAMING_SNAKE_CASE : Union[str, Any] = BatchFeature({"""input_features""": features})
_SCREAMING_SNAKE_CASE : List[Any] = self.pad(
_A , padding=_A , max_length=_A , truncation=_A , pad_to_multiple_of=_A , return_attention_mask=_A , **_A , )
# make sure list is in array format
_SCREAMING_SNAKE_CASE : str = padded_inputs.get("""input_features""")
if isinstance(input_features[0] , _A):
_SCREAMING_SNAKE_CASE : Tuple = [np.asarray(_A , dtype=np.floataa) for feature in input_features]
_SCREAMING_SNAKE_CASE : Optional[int] = padded_inputs.get("""attention_mask""")
if attention_mask is not None:
_SCREAMING_SNAKE_CASE : List[str] = [np.asarray(_A , dtype=np.intaa) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
_SCREAMING_SNAKE_CASE : Dict = (
np.array(_A , dtype=np.intaa)
if self._get_padding_strategies(_A , max_length=_A) is not PaddingStrategy.DO_NOT_PAD
else None
)
_SCREAMING_SNAKE_CASE : Tuple = self.normalize(
padded_inputs["""input_features"""] , attention_mask=_A)
if return_tensors is not None:
_SCREAMING_SNAKE_CASE : List[Any] = padded_inputs.convert_to_tensors(_A)
return padded_inputs
| 635 | """simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = ['''model.decoder.embed_positions.weights''']
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[int]:
if "emb" in name:
_SCREAMING_SNAKE_CASE : List[Any] = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
_SCREAMING_SNAKE_CASE : List[str] = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
_SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
_SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
_SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
_SCREAMING_SNAKE_CASE : int = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
_SCREAMING_SNAKE_CASE : Dict = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
_SCREAMING_SNAKE_CASE : Dict = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
_SCREAMING_SNAKE_CASE : Tuple = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
_SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
_SCREAMING_SNAKE_CASE : str = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple[Dict, Dict]:
_SCREAMING_SNAKE_CASE : str = list(state_dict.keys() )
_SCREAMING_SNAKE_CASE : Tuple = {}
for key in keys:
_SCREAMING_SNAKE_CASE : Dict = state_dict.pop(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : int = rename_keys(__SCREAMING_SNAKE_CASE )
if "in_proj_weight" in key:
# split fused qkv proj
_SCREAMING_SNAKE_CASE : str = val[:hidden_size, :]
_SCREAMING_SNAKE_CASE : Any = val[hidden_size : 2 * hidden_size, :]
_SCREAMING_SNAKE_CASE : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
_SCREAMING_SNAKE_CASE : int = val
else:
_SCREAMING_SNAKE_CASE : Dict = val
return state_dict, enc_dec_proj_state_dict
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
_SCREAMING_SNAKE_CASE : Optional[Any] = 1_024
_SCREAMING_SNAKE_CASE : str = 24
_SCREAMING_SNAKE_CASE : Any = 16
elif checkpoint == "medium":
_SCREAMING_SNAKE_CASE : Dict = 1_536
_SCREAMING_SNAKE_CASE : Union[str, Any] = 48
_SCREAMING_SNAKE_CASE : Optional[Any] = 24
elif checkpoint == "large":
_SCREAMING_SNAKE_CASE : List[Any] = 2_048
_SCREAMING_SNAKE_CASE : Optional[int] = 48
_SCREAMING_SNAKE_CASE : str = 32
else:
raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
_SCREAMING_SNAKE_CASE : Optional[Any] = MusicgenDecoderConfig(
hidden_size=__SCREAMING_SNAKE_CASE , ffn_dim=hidden_size * 4 , num_hidden_layers=__SCREAMING_SNAKE_CASE , num_attention_heads=__SCREAMING_SNAKE_CASE , )
return config
@torch.no_grad()
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="cpu" )-> str:
_SCREAMING_SNAKE_CASE : str = MusicGen.get_pretrained(__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : List[str] = decoder_config_from_checkpoint(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : List[Any] = fairseq_model.lm.state_dict()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = rename_state_dict(
__SCREAMING_SNAKE_CASE , hidden_size=decoder_config.hidden_size )
_SCREAMING_SNAKE_CASE : Tuple = TaEncoderModel.from_pretrained("""t5-base""" )
_SCREAMING_SNAKE_CASE : List[Any] = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
_SCREAMING_SNAKE_CASE : str = MusicgenForCausalLM(__SCREAMING_SNAKE_CASE ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = decoder.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
_SCREAMING_SNAKE_CASE : Dict = MusicgenForConditionalGeneration(text_encoder=__SCREAMING_SNAKE_CASE , audio_encoder=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__SCREAMING_SNAKE_CASE )
# check we can do a forward pass
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
_SCREAMING_SNAKE_CASE : Dict = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Optional[int] = model(input_ids=__SCREAMING_SNAKE_CASE , decoder_input_ids=__SCREAMING_SNAKE_CASE ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
_SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained("""t5-base""" )
_SCREAMING_SNAKE_CASE : List[str] = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
_SCREAMING_SNAKE_CASE : Optional[int] = MusicgenProcessor(feature_extractor=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE )
# set the appropriate bos/pad token ids
_SCREAMING_SNAKE_CASE : Optional[Any] = 2_048
_SCREAMING_SNAKE_CASE : List[Any] = 2_048
# set other default generation config params
_SCREAMING_SNAKE_CASE : Any = int(30 * audio_encoder.config.frame_rate )
_SCREAMING_SNAKE_CASE : Tuple = True
_SCREAMING_SNAKE_CASE : int = 3.0
if pytorch_dump_folder is not None:
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(__SCREAMING_SNAKE_CASE )
processor.push_to_hub(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
lowerCAmelCase_ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 635 | 1 |
"""simple docstring"""
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
__UpperCamelCase : str = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
__UpperCamelCase : str = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
__UpperCamelCase : Optional[Any] = {
'''abeja/gpt-neox-japanese-2.7b''': 2048,
}
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Any ):
with open(_UpperCAmelCase , 'r' , encoding='utf-8' ) as f:
lowerCAmelCase = json.loads(f.read() )
lowerCAmelCase = collections.OrderedDict()
lowerCAmelCase = collections.OrderedDict()
lowerCAmelCase = collections.OrderedDict()
with open(_UpperCAmelCase , 'r' , encoding='utf-8' ) as f:
lowerCAmelCase = f.readlines()
lowerCAmelCase = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(_UpperCAmelCase ):
lowerCAmelCase = b
lowerCAmelCase = idx
for wd in b:
lowerCAmelCase = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class a ( a__ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ['''input_ids''', '''attention_mask''']
def __init__( self , _snake_case , _snake_case , _snake_case="<|endoftext|>" , _snake_case="<|endoftext|>" , _snake_case="<|startoftext|>" , _snake_case="<|endoftext|>" , _snake_case=False , **_snake_case , ):
"""simple docstring"""
super().__init__(
unk_token=_snake_case , pad_token=_snake_case , bos_token=_snake_case , eos_token=_snake_case , do_clean_text=_snake_case , **_snake_case , )
if not os.path.isfile(_snake_case ):
raise ValueError(
F'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(_snake_case ):
raise ValueError(
F'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
lowerCAmelCase = do_clean_text
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = load_vocab_and_emoji(_snake_case , _snake_case )
lowerCAmelCase = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.raw_vocab )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.subword_tokenizer.tokenize(_snake_case , clean=self.do_clean_text )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.vocab.get(_snake_case , self.vocab.get(self.unk_token ) )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(_snake_case )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = ''.join(_snake_case ).strip()
return out_string
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_snake_case , add_special_tokens=_snake_case ) + [self.eos_token_id] )
if len(_snake_case ) > self.model_max_length:
lowerCAmelCase = input_ids[-self.model_max_length :]
return input_ids
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = 0
if os.path.isdir(_snake_case ):
lowerCAmelCase = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
lowerCAmelCase = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
lowerCAmelCase = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(_snake_case , 'w' , encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
lowerCAmelCase = token_index
writer.write(','.join(_snake_case ) + '\n' )
index += 1
with open(_snake_case , 'w' , encoding='utf-8' ) as writer:
json.dump(self.emoji , _snake_case )
return vocab_file, emoji_file
class a ( a__ ):
def __init__( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = vocab # same as swe
lowerCAmelCase = ids_to_tokens # same as bpe
lowerCAmelCase = emoji
lowerCAmelCase = np.max([len(_snake_case ) for w in self.vocab.keys()] )
lowerCAmelCase = re.compile(r'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
lowerCAmelCase = re.compile(r'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
lowerCAmelCase = re.compile(r'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
lowerCAmelCase = re.compile(
r'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
lowerCAmelCase = re.compile(
r'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
lowerCAmelCase = re.compile(
r'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
lowerCAmelCase = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
lowerCAmelCase = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
lowerCAmelCase = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self ):
"""simple docstring"""
return len(self.ids_to_tokens )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.content_repattera.sub('<URL>' , _snake_case )
lowerCAmelCase = self.content_repattera.sub('<EMAIL>' , _snake_case )
lowerCAmelCase = self.content_repattera.sub('<TEL>' , _snake_case )
lowerCAmelCase = self.content_repattera.sub('<DATE>' , _snake_case )
lowerCAmelCase = self.content_repattera.sub('<DATE>' , _snake_case )
lowerCAmelCase = self.content_repattera.sub('<PRICE>' , _snake_case )
lowerCAmelCase = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
lowerCAmelCase = content.replace('<BLOCK><BLOCK>' , '<BLOCK>' )
return content
def UpperCamelCase__ ( self , _snake_case , _snake_case=False ):
"""simple docstring"""
lowerCAmelCase = text.replace(' ' , '<SP>' )
lowerCAmelCase = text.replace(' ' , '<SP>' )
lowerCAmelCase = text.replace('\r\n' , '<BR>' )
lowerCAmelCase = text.replace('\n' , '<BR>' )
lowerCAmelCase = text.replace('\r' , '<BR>' )
lowerCAmelCase = text.replace('\t' , '<TAB>' )
lowerCAmelCase = text.replace('—' , 'ー' )
lowerCAmelCase = text.replace('−' , 'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
lowerCAmelCase = text.replace(_snake_case , _snake_case )
if clean:
lowerCAmelCase = self.clean_text(_snake_case )
def check_simbol(_snake_case ):
lowerCAmelCase = x.encode()
if len(_snake_case ) == 1 and len(_snake_case ) == 2:
lowerCAmelCase = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xc_2a1 and c <= 0xc_2bf)
or (c >= 0xc_780 and c <= 0xc_783)
or (c >= 0xc_ab9 and c <= 0xc_bbf)
or (c >= 0xc_c80 and c <= 0xc_da2)
):
return True
return False
def checkuae(_snake_case ):
lowerCAmelCase = x.encode()
if len(_snake_case ) == 1 and len(_snake_case ) == 3:
lowerCAmelCase = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xe28_080 and c <= 0xe2b_07f:
return True
return False
lowerCAmelCase = 0
lowerCAmelCase = []
while pos < len(_snake_case ):
lowerCAmelCase = min(len(_snake_case ) , pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
lowerCAmelCase = [] # (token_id, token, pos)
for e in range(_snake_case , _snake_case , -1 ):
lowerCAmelCase = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(_snake_case ) > 2:
lowerCAmelCase = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(_snake_case ) > 0:
# the smallest token_id is adopted
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = sorted(_snake_case , key=lambda _snake_case : x[0] )[0]
result.append(_snake_case )
lowerCAmelCase = e
else:
lowerCAmelCase = pos + 1
lowerCAmelCase = text[pos:end]
if check_simbol(_snake_case ):
result.append('<KIGOU>' )
elif checkuae(_snake_case ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
lowerCAmelCase = end
return result
def UpperCamelCase__ ( self , _snake_case , _snake_case="\n" ):
"""simple docstring"""
lowerCAmelCase = []
lowerCAmelCase = []
lowerCAmelCase = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(_snake_case ) > 0:
words.append(bytearray(_snake_case ).decode('utf-8' , errors='replace' ) )
lowerCAmelCase = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(_snake_case )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(_snake_case )
if len(_snake_case ) > 0:
words.append(bytearray(_snake_case ).decode('utf-8' , errors='replace' ) )
lowerCAmelCase = ''.join(_snake_case )
return text
| 4 |
"""simple docstring"""
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : nn.ModuleList , _UpperCAmelCase : nn.ModuleList , _UpperCAmelCase : List[int] ):
lowerCAmelCase = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ), F'{len(_UpperCAmelCase )} != {len(_UpperCAmelCase )}'
dest_layers.load_state_dict(layers_to_copy.state_dict() )
__UpperCamelCase : Optional[Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
__UpperCamelCase : int = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] ):
try:
lowerCAmelCase = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'
F' {n_student}' )
return list(range(_UpperCAmelCase ) )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : Dict ):
if n_student > n_teacher:
raise ValueError(F'Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}' )
elif n_teacher == n_student:
return list(range(_UpperCAmelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, PreTrainedModel] , _UpperCAmelCase : Union[str, Path] = "student" , _UpperCAmelCase : Union[int, None] = None , _UpperCAmelCase : Union[int, None] = None , _UpperCAmelCase : List[str]=False , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Optional[Any]=None , **_UpperCAmelCase : str , ):
lowerCAmelCase = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'
assert (e is not None) or (d is not None), _msg
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
AutoTokenizer.from_pretrained(_UpperCAmelCase ).save_pretrained(_UpperCAmelCase ) # purely for convenience
lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(_UpperCAmelCase ).eval()
else:
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), F'teacher must be a model or string got type {type(_UpperCAmelCase )}'
lowerCAmelCase = teacher.config.to_diff_dict()
try:
lowerCAmelCase ,lowerCAmelCase = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
lowerCAmelCase = teacher_e
if d is None:
lowerCAmelCase = teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} )
except AttributeError: # T5
if hasattr(teacher.config , 'num_encoder_layers' ):
lowerCAmelCase ,lowerCAmelCase = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
lowerCAmelCase ,lowerCAmelCase = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
lowerCAmelCase = teacher_e
if d is None:
lowerCAmelCase = teacher_d
if hasattr(teacher.config , 'num_encoder_layers' ):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} )
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(_UpperCAmelCase )
# Copy weights
lowerCAmelCase = teacher.config_class(**_UpperCAmelCase )
lowerCAmelCase = AutoModelForSeqaSeqLM.from_config(_UpperCAmelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
lowerCAmelCase = student.load_state_dict(teacher.state_dict() , strict=_UpperCAmelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
lowerCAmelCase ,lowerCAmelCase = list(range(_UpperCAmelCase ) ), list(range(_UpperCAmelCase ) )
logger.info(
F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'
F' {save_path}' )
student.save_pretrained(_UpperCAmelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
lowerCAmelCase = pick_layers_to_copy(_UpperCAmelCase , _UpperCAmelCase )
if d_layers_to_copy is None:
lowerCAmelCase = pick_layers_to_copy(_UpperCAmelCase , _UpperCAmelCase )
try:
if hasattr(
_UpperCAmelCase , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , _UpperCAmelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , _UpperCAmelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , _UpperCAmelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , _UpperCAmelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , _UpperCAmelCase )
copy_layers(teacher.decoder.block , student.decoder.block , _UpperCAmelCase )
logger.info(
F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}' )
lowerCAmelCase = {
'teacher_type': teacher.config.model_type,
'copied_encoder_layers': e_layers_to_copy,
'copied_decoder_layers': d_layers_to_copy,
}
student.save_pretrained(_UpperCAmelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 4 | 1 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
# Load configuration defined in the metadata file
with open(UpperCamelCase ) as metadata_file:
A = json.load(UpperCamelCase )
A = LukeConfig(use_entity_aware_attention=UpperCamelCase , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
A = torch.load(UpperCamelCase , map_location="cpu" )
# Load the entity vocab file
A = load_entity_vocab(UpperCamelCase )
A = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
A = AddedToken("<ent>" , lstrip=UpperCamelCase , rstrip=UpperCamelCase )
A = AddedToken("<ent2>" , lstrip=UpperCamelCase , rstrip=UpperCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(UpperCamelCase )
with open(os.path.join(UpperCamelCase , LukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(UpperCamelCase , UpperCamelCase )
A = LukeTokenizer.from_pretrained(UpperCamelCase )
# Initialize the embeddings of the special tokens
A = state_dict["embeddings.word_embeddings.weight"]
A = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 )
A = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 )
A = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
A = F"encoder.layer.{layer_index}.attention.self."
A = state_dict[prefix + matrix_name]
A = state_dict[prefix + matrix_name]
A = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
A = state_dict["entity_embeddings.entity_embeddings.weight"]
A = entity_emb[entity_vocab["[MASK]"]]
A = LukeModel(config=UpperCamelCase ).eval()
A, A = model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
if not (len(UpperCamelCase ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F"Missing keys {', '.join(UpperCamelCase )}. Expected only missing embeddings.position_ids" )
if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )):
raise ValueError(
"Unexpected keys"
F" {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}" )
# Check outputs
A = LukeTokenizer.from_pretrained(UpperCamelCase , task="entity_classification" )
A = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"
" new world number one avoid a humiliating second- round exit at Wimbledon ."
)
A = (39, 42)
A = tokenizer(UpperCamelCase , entity_spans=[span] , add_prefix_space=UpperCamelCase , return_tensors="pt" )
A = model(**UpperCamelCase )
# Verify word hidden states
if model_size == "large":
A = torch.Size((1, 42, 1_024) )
A = torch.tensor(
[[0.01_33, 0.08_65, 0.00_95], [0.30_93, -0.25_76, -0.74_18], [-0.17_20, -0.21_17, -0.28_69]] )
else: # base
A = torch.Size((1, 42, 768) )
A = torch.tensor([[0.00_37, 0.13_68, -0.00_91], [0.10_99, 0.33_29, -0.10_95], [0.07_65, 0.53_35, 0.11_79]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
A = torch.Size((1, 1, 1_024) )
A = torch.tensor([[0.04_66, -0.01_06, -0.01_79]] )
else: # base
A = torch.Size((1, 1, 768) )
A = torch.tensor([[0.14_57, 0.10_44, 0.01_74]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
F" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , UpperCamelCase , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(UpperCamelCase ) )
model.save_pretrained(UpperCamelCase )
def A__ ( UpperCamelCase ):
A = {}
with open(UpperCamelCase , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(UpperCamelCase ):
A, A = line.rstrip().split("\t" )
A = index
return entity_vocab
if __name__ == "__main__":
_snake_case : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
_snake_case : List[str] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 524 |
"""simple docstring"""
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCAmelCase ( lowercase_ , unittest.TestCase ):
UpperCamelCase = ProphetNetTokenizer
UpperCamelCase = False
def lowerCamelCase ( self :Any ):
super().setUp()
A = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def lowerCamelCase ( self :Any , __UpperCamelCase :List[str] ):
A = "UNwant\u00E9d,running"
A = "unwanted, running"
return input_text, output_text
def lowerCamelCase ( self :Optional[Any] ):
A = self.tokenizer_class(self.vocab_file )
A = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(__UpperCamelCase , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [9, 6, 7, 12, 10, 11] )
def lowerCamelCase ( self :Optional[int] ):
A = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def lowerCamelCase ( self :List[Any] ):
A = BasicTokenizer(do_lower_case=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def lowerCamelCase ( self :Any ):
A = BasicTokenizer(do_lower_case=__UpperCamelCase , strip_accents=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def lowerCamelCase ( self :Tuple ):
A = BasicTokenizer(do_lower_case=__UpperCamelCase , strip_accents=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def lowerCamelCase ( self :Optional[int] ):
A = BasicTokenizer(do_lower_case=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def lowerCamelCase ( self :Dict ):
A = BasicTokenizer(do_lower_case=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def lowerCamelCase ( self :List[Any] ):
A = BasicTokenizer(do_lower_case=__UpperCamelCase , strip_accents=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def lowerCamelCase ( self :Optional[Any] ):
A = BasicTokenizer(do_lower_case=__UpperCamelCase , strip_accents=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def lowerCamelCase ( self :Dict ):
A = BasicTokenizer(do_lower_case=__UpperCamelCase , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def lowerCamelCase ( self :List[Any] ):
A = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
A = {}
for i, token in enumerate(__UpperCamelCase ):
A = i
A = WordpieceTokenizer(vocab=__UpperCamelCase , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
@require_torch
def lowerCamelCase ( self :Optional[Any] ):
A = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
A = ["A long paragraph for summarization.", "Another paragraph for summarization."]
A = [10_37, 21_46, 2_04_23, 20_05, 76_80, 78_49, 39_89, 10_12, 1_02]
A = tokenizer(__UpperCamelCase , padding=__UpperCamelCase , return_tensors="pt" )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
A = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def lowerCamelCase ( self :Optional[Any] ):
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def lowerCamelCase ( self :Any ):
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def lowerCamelCase ( self :List[Any] ):
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
@slow
def lowerCamelCase ( self :Dict ):
A = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
A = tokenizer.encode("sequence builders" , add_special_tokens=__UpperCamelCase )
A = tokenizer.encode("multi-sequence build" , add_special_tokens=__UpperCamelCase )
A = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase )
A = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase , __UpperCamelCase )
assert encoded_sentence == text + [1_02]
assert encoded_pair == text + [1_02] + text_a + [1_02]
| 524 | 1 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = F"""{sampling_rate}"""
lowercase__ = '''1'''
lowercase__ = '''f32le'''
lowercase__ = [
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(__SCREAMING_SNAKE_CASE , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
lowercase__ = ffmpeg_process.communicate(__SCREAMING_SNAKE_CASE )
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error
lowercase__ = output_stream[0]
lowercase__ = np.frombuffer(__SCREAMING_SNAKE_CASE , np.floataa )
if audio.shape[0] == 0:
raise ValueError('''Malformed soundfile''' )
return audio
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = "f32le" , ):
'''simple docstring'''
lowercase__ = F"""{sampling_rate}"""
lowercase__ = '''1'''
if format_for_conversion == "s16le":
lowercase__ = 2
elif format_for_conversion == "f32le":
lowercase__ = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
lowercase__ = platform.system()
if system == "Linux":
lowercase__ = '''alsa'''
lowercase__ = '''default'''
elif system == "Darwin":
lowercase__ = '''avfoundation'''
lowercase__ = ''':0'''
elif system == "Windows":
lowercase__ = '''dshow'''
lowercase__ = '''default'''
lowercase__ = [
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
lowercase__ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowercase__ = _ffmpeg_stream(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for item in iterator:
yield item
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = "f32le" , ):
'''simple docstring'''
if stream_chunk_s is not None:
lowercase__ = stream_chunk_s
else:
lowercase__ = chunk_length_s
lowercase__ = ffmpeg_microphone(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , format_for_conversion=__SCREAMING_SNAKE_CASE )
if format_for_conversion == "s16le":
lowercase__ = np.intaa
lowercase__ = 2
elif format_for_conversion == "f32le":
lowercase__ = np.floataa
lowercase__ = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
lowercase__ = chunk_length_s / 6
lowercase__ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__SCREAMING_SNAKE_CASE , (int, float) ):
lowercase__ = [stride_length_s, stride_length_s]
lowercase__ = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowercase__ = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowercase__ = datetime.datetime.now()
lowercase__ = datetime.timedelta(seconds=__SCREAMING_SNAKE_CASE )
for item in chunk_bytes_iter(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , stride=(stride_left, stride_right) , stream=__SCREAMING_SNAKE_CASE ):
# Put everything back in numpy scale
lowercase__ = np.frombuffer(item['''raw'''] , dtype=__SCREAMING_SNAKE_CASE )
lowercase__ = (
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
lowercase__ = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = False ):
'''simple docstring'''
lowercase__ = b''''''
lowercase__ , lowercase__ = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
lowercase__ = 0
for raw in iterator:
acc += raw
if stream and len(__SCREAMING_SNAKE_CASE ) < chunk_len:
lowercase__ = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__SCREAMING_SNAKE_CASE ) >= chunk_len:
# We are flushing the accumulator
lowercase__ = (_stride_left, stride_right)
lowercase__ = {'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
lowercase__ = False
yield item
lowercase__ = stride_left
lowercase__ = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__SCREAMING_SNAKE_CASE ) > stride_left:
lowercase__ = {'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
lowercase__ = False
yield item
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 2**24 # 16Mo
try:
with subprocess.Popen(__SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE , bufsize=__SCREAMING_SNAKE_CASE ) as ffmpeg_process:
while True:
lowercase__ = ffmpeg_process.stdout.read(__SCREAMING_SNAKE_CASE )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
| 183 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 84 | 0 |
"""simple docstring"""
def __lowercase ( lowerCamelCase_ : int , lowerCamelCase_ : int ):
return x if y == 0 else greatest_common_divisor(lowerCamelCase_ , x % y )
def __lowercase ( lowerCamelCase_ : int , lowerCamelCase_ : int ):
return (x * y) // greatest_common_divisor(lowerCamelCase_ , lowerCamelCase_ )
def __lowercase ( lowerCamelCase_ : int = 20 ):
SCREAMING_SNAKE_CASE__ = 1
for i in range(1 , n + 1 ):
SCREAMING_SNAKE_CASE__ = lcm(lowerCamelCase_ , lowerCamelCase_ )
return g
if __name__ == "__main__":
print(f"""{solution() = }""")
| 718 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 112 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCAmelCase : Dict = {
"configuration_squeezebert": [
"SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SqueezeBertConfig",
"SqueezeBertOnnxConfig",
],
"tokenization_squeezebert": ["SqueezeBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int = ["SqueezeBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Union[str, Any] = [
"SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"SqueezeBertForMaskedLM",
"SqueezeBertForMultipleChoice",
"SqueezeBertForQuestionAnswering",
"SqueezeBertForSequenceClassification",
"SqueezeBertForTokenClassification",
"SqueezeBertModel",
"SqueezeBertModule",
"SqueezeBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 289 |
"""simple docstring"""
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def __snake_case ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any]="shi-labs/oneformer_demo" ) -> Tuple:
'''simple docstring'''
with open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="dataset" ) , "r" ) as f:
_UpperCAmelCase : Optional[int] = json.load(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : int = {}
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : List[Any] = []
for key, info in class_info.items():
_UpperCAmelCase : List[str] = info["name"]
class_names.append(info["name"] )
if info["isthing"]:
thing_ids.append(int(SCREAMING_SNAKE_CASE__ ) )
_UpperCAmelCase : Tuple = thing_ids
_UpperCAmelCase : str = class_names
return metadata
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self : Optional[Any] , A : Tuple , A : str=7 , A : Union[str, Any]=3 , A : Union[str, Any]=3_0 , A : Dict=4_0_0 , A : List[str]=None , A : str=True , A : Union[str, Any]=True , A : Optional[Any]=[0.5, 0.5, 0.5] , A : str=[0.5, 0.5, 0.5] , A : Optional[Any]=1_0 , A : Optional[int]=False , A : int=2_5_5 , A : List[Any]="shi-labs/oneformer_demo" , A : int="ade20k_panoptic.json" , A : str=1_0 , ):
_UpperCAmelCase : int = parent
_UpperCAmelCase : Any = batch_size
_UpperCAmelCase : str = num_channels
_UpperCAmelCase : str = min_resolution
_UpperCAmelCase : List[str] = max_resolution
_UpperCAmelCase : List[Any] = do_resize
_UpperCAmelCase : List[Any] = {"shortest_edge": 3_2, "longest_edge": 1_3_3_3} if size is None else size
_UpperCAmelCase : Optional[Any] = do_normalize
_UpperCAmelCase : Optional[int] = image_mean
_UpperCAmelCase : Dict = image_std
_UpperCAmelCase : Any = class_info_file
_UpperCAmelCase : Optional[int] = prepare_metadata(A , A )
_UpperCAmelCase : Any = num_text
_UpperCAmelCase : Dict = repo_path
# for the post_process_functions
_UpperCAmelCase : str = 2
_UpperCAmelCase : Any = 1_0
_UpperCAmelCase : Optional[int] = 1_0
_UpperCAmelCase : Tuple = 3
_UpperCAmelCase : List[str] = 4
_UpperCAmelCase : int = num_labels
_UpperCAmelCase : Optional[Any] = do_reduce_labels
_UpperCAmelCase : Any = ignore_index
def snake_case_ ( self : Optional[int] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def snake_case_ ( self : str , A : int , A : Optional[Any]=False ):
if not batched:
_UpperCAmelCase : List[str] = image_inputs[0]
if isinstance(A , Image.Image ):
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = image.size
else:
_UpperCAmelCase , _UpperCAmelCase : Any = image.shape[1], image.shape[2]
if w < h:
_UpperCAmelCase : Optional[int] = int(self.size["shortest_edge"] * h / w )
_UpperCAmelCase : Tuple = self.size["shortest_edge"]
elif w > h:
_UpperCAmelCase : Dict = self.size["shortest_edge"]
_UpperCAmelCase : Dict = int(self.size["shortest_edge"] * w / h )
else:
_UpperCAmelCase : Optional[int] = self.size["shortest_edge"]
_UpperCAmelCase : Optional[int] = self.size["shortest_edge"]
else:
_UpperCAmelCase : List[str] = []
for image in image_inputs:
_UpperCAmelCase , _UpperCAmelCase : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_UpperCAmelCase : int = max(A , key=lambda A : item[0] )[0]
_UpperCAmelCase : Dict = max(A , key=lambda A : item[1] )[1]
return expected_height, expected_width
def snake_case_ ( self : List[str] ):
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class UpperCAmelCase_ ( _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : str = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
__SCREAMING_SNAKE_CASE : Tuple = image_processing_class
def snake_case_ ( self : Tuple ):
_UpperCAmelCase : Optional[Any] = OneFormerImageProcessorTester(self )
@property
def snake_case_ ( self : Optional[Any] ):
return self.image_processing_tester.prepare_image_processor_dict()
def snake_case_ ( self : Union[str, Any] ):
_UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , "image_mean" ) )
self.assertTrue(hasattr(A , "image_std" ) )
self.assertTrue(hasattr(A , "do_normalize" ) )
self.assertTrue(hasattr(A , "do_resize" ) )
self.assertTrue(hasattr(A , "size" ) )
self.assertTrue(hasattr(A , "ignore_index" ) )
self.assertTrue(hasattr(A , "class_info_file" ) )
self.assertTrue(hasattr(A , "num_text" ) )
self.assertTrue(hasattr(A , "repo_path" ) )
self.assertTrue(hasattr(A , "metadata" ) )
self.assertTrue(hasattr(A , "do_reduce_labels" ) )
def snake_case_ ( self : List[Any] ):
pass
def snake_case_ ( self : Union[str, Any] ):
# Initialize image_processor
_UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase : Optional[int] = prepare_image_inputs(self.image_processing_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
_UpperCAmelCase : Optional[Any] = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.image_processing_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase , _UpperCAmelCase : Dict = self.image_processing_tester.get_expected_values(A , batched=A )
_UpperCAmelCase : Optional[Any] = image_processor(
A , ["semantic"] * len(A ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case_ ( self : str ):
# Initialize image_processor
_UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase : Any = prepare_image_inputs(self.image_processing_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
_UpperCAmelCase : int = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
_UpperCAmelCase , _UpperCAmelCase : List[Any] = self.image_processing_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase , _UpperCAmelCase : int = self.image_processing_tester.get_expected_values(A , batched=A )
_UpperCAmelCase : List[str] = image_processor(
A , ["semantic"] * len(A ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case_ ( self : Optional[int] ):
# Initialize image_processor
_UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase : str = prepare_image_inputs(self.image_processing_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
_UpperCAmelCase : Tuple = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
_UpperCAmelCase , _UpperCAmelCase : Any = self.image_processing_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase , _UpperCAmelCase : List[str] = self.image_processing_tester.get_expected_values(A , batched=A )
_UpperCAmelCase : Optional[int] = image_processor(
A , ["semantic"] * len(A ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case_ ( self : Optional[int] , A : Tuple=False , A : Optional[Any]=False , A : int="np" ):
_UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
_UpperCAmelCase : List[str] = self.image_processing_tester.num_labels
_UpperCAmelCase : Any = None
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : Dict = prepare_image_inputs(self.image_processing_tester , equal_resolution=A )
if with_segmentation_maps:
_UpperCAmelCase : Union[str, Any] = num_labels
if is_instance_map:
_UpperCAmelCase : Optional[int] = list(range(A ) ) * 2
_UpperCAmelCase : Union[str, Any] = dict(enumerate(A ) )
_UpperCAmelCase : Tuple = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
_UpperCAmelCase : Optional[int] = [Image.fromarray(A ) for annotation in annotations]
_UpperCAmelCase : int = image_processor(
A , ["semantic"] * len(A ) , A , return_tensors="pt" , instance_id_to_semantic_id=A , pad_and_return_pixel_mask=A , )
return inputs
def snake_case_ ( self : Any ):
pass
def snake_case_ ( self : Dict ):
def common(A : List[Any]=False , A : List[str]=None ):
_UpperCAmelCase : str = self.comm_get_image_processor_inputs(
with_segmentation_maps=A , is_instance_map=A , segmentation_type=A )
_UpperCAmelCase : Optional[int] = inputs["mask_labels"]
_UpperCAmelCase : str = inputs["class_labels"]
_UpperCAmelCase : List[str] = inputs["pixel_values"]
_UpperCAmelCase : Any = inputs["text_inputs"]
# check the batch_size
for mask_label, class_label, text_input in zip(A , A , A ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(A ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=A )
common(is_instance_map=A , segmentation_type="pil" )
common(is_instance_map=A , segmentation_type="pil" )
def snake_case_ ( self : int ):
_UpperCAmelCase : Optional[Any] = np.zeros((2_0, 5_0) )
_UpperCAmelCase : Dict = 1
_UpperCAmelCase : Optional[int] = 1
_UpperCAmelCase : Optional[int] = 1
_UpperCAmelCase : List[str] = binary_mask_to_rle(A )
self.assertEqual(len(A ) , 4 )
self.assertEqual(rle[0] , 2_1 )
self.assertEqual(rle[1] , 4_5 )
def snake_case_ ( self : Optional[Any] ):
_UpperCAmelCase : Optional[Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
_UpperCAmelCase : int = self.image_processing_tester.get_fake_oneformer_outputs()
_UpperCAmelCase : List[str] = fature_extractor.post_process_semantic_segmentation(A )
self.assertEqual(len(A ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
_UpperCAmelCase : Tuple = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
_UpperCAmelCase : Optional[Any] = fature_extractor.post_process_semantic_segmentation(A , target_sizes=A )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def snake_case_ ( self : Dict ):
_UpperCAmelCase : Tuple = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
_UpperCAmelCase : Union[str, Any] = self.image_processing_tester.get_fake_oneformer_outputs()
_UpperCAmelCase : Tuple = image_processor.post_process_instance_segmentation(A , threshold=0 )
self.assertTrue(len(A ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , A )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def snake_case_ ( self : Any ):
_UpperCAmelCase : Optional[int] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
_UpperCAmelCase : Optional[Any] = self.image_processing_tester.get_fake_oneformer_outputs()
_UpperCAmelCase : Tuple = image_processor.post_process_panoptic_segmentation(A , threshold=0 )
self.assertTrue(len(A ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , A )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 289 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE : str = {
"configuration_falcon": ["FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP", "FalconConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : int = [
"FALCON_PRETRAINED_MODEL_ARCHIVE_LIST",
"FalconForCausalLM",
"FalconModel",
"FalconPreTrainedModel",
"FalconForSequenceClassification",
"FalconForTokenClassification",
"FalconForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 721 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE : str = "RegNetConfig"
# Base docstring
SCREAMING_SNAKE_CASE : Tuple = "facebook/regnet-y-040"
SCREAMING_SNAKE_CASE : Dict = [1, 1088, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE : Optional[Any] = "facebook/regnet-y-040"
SCREAMING_SNAKE_CASE : List[str] = "tabby, tabby cat"
SCREAMING_SNAKE_CASE : Optional[int] = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class _lowerCamelCase( tf.keras.layers.Layer ):
def __init__( self, lowerCamelCase, lowerCamelCase = 3, lowerCamelCase = 1, lowerCamelCase = 1, lowerCamelCase = "relu", **lowerCamelCase, ) -> int:
"""simple docstring"""
super().__init__(**lowerCamelCase)
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
_lowercase : Optional[int] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2)
_lowercase : List[Any] = tf.keras.layers.ConvaD(
filters=lowerCamelCase, kernel_size=lowerCamelCase, strides=lowerCamelCase, padding='VALID', groups=lowerCamelCase, use_bias=lowerCamelCase, name='convolution', )
_lowercase : Dict = tf.keras.layers.BatchNormalization(epsilon=1E-5, momentum=0.9, name='normalization')
_lowercase : Optional[int] = ACTaFN[activation] if activation is not None else tf.identity
def UpperCamelCase ( self, lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : str = self.convolution(self.padding(lowerCamelCase))
_lowercase : int = self.normalization(lowerCamelCase)
_lowercase : Optional[Any] = self.activation(lowerCamelCase)
return hidden_state
class _lowerCamelCase( tf.keras.layers.Layer ):
def __init__( self, lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
super().__init__(**lowerCamelCase)
_lowercase : Tuple = config.num_channels
_lowercase : Tuple = TFRegNetConvLayer(
out_channels=config.embedding_size, kernel_size=3, stride=2, activation=config.hidden_act, name='embedder', )
def UpperCamelCase ( self, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Dict = shape_list(lowerCamelCase)[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.')
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
_lowercase : Optional[Any] = tf.transpose(lowerCamelCase, perm=(0, 2, 3, 1))
_lowercase : int = self.embedder(lowerCamelCase)
return hidden_state
class _lowerCamelCase( tf.keras.layers.Layer ):
def __init__( self, lowerCamelCase, lowerCamelCase = 2, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCamelCase)
_lowercase : List[Any] = tf.keras.layers.ConvaD(
filters=lowerCamelCase, kernel_size=1, strides=lowerCamelCase, use_bias=lowerCamelCase, name='convolution')
_lowercase : Optional[int] = tf.keras.layers.BatchNormalization(epsilon=1E-5, momentum=0.9, name='normalization')
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = False) -> tf.Tensor:
"""simple docstring"""
return self.normalization(self.convolution(lowerCamelCase), training=lowerCamelCase)
class _lowerCamelCase( tf.keras.layers.Layer ):
def __init__( self, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
super().__init__(**lowerCamelCase)
_lowercase : str = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase, name='pooler')
_lowercase : int = [
tf.keras.layers.ConvaD(filters=lowerCamelCase, kernel_size=1, activation='relu', name='attention.0'),
tf.keras.layers.ConvaD(filters=lowerCamelCase, kernel_size=1, activation='sigmoid', name='attention.2'),
]
def UpperCamelCase ( self, lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : Optional[int] = self.pooler(lowerCamelCase)
for layer_module in self.attention:
_lowercase : Optional[int] = layer_module(lowerCamelCase)
_lowercase : Optional[int] = hidden_state * pooled
return hidden_state
class _lowerCamelCase( tf.keras.layers.Layer ):
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = 1, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
super().__init__(**lowerCamelCase)
_lowercase : Optional[Any] = in_channels != out_channels or stride != 1
_lowercase : List[Any] = max(1, out_channels // config.groups_width)
_lowercase : List[str] = (
TFRegNetShortCut(lowerCamelCase, stride=lowerCamelCase, name='shortcut')
if should_apply_shortcut
else tf.keras.layers.Activation('linear', name='shortcut')
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
_lowercase : str = [
TFRegNetConvLayer(lowerCamelCase, kernel_size=1, activation=config.hidden_act, name='layer.0'),
TFRegNetConvLayer(
lowerCamelCase, stride=lowerCamelCase, groups=lowerCamelCase, activation=config.hidden_act, name='layer.1'),
TFRegNetConvLayer(lowerCamelCase, kernel_size=1, activation=lowerCamelCase, name='layer.2'),
]
_lowercase : Tuple = ACTaFN[config.hidden_act]
def UpperCamelCase ( self, lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : str = hidden_state
for layer_module in self.layers:
_lowercase : List[Any] = layer_module(lowerCamelCase)
_lowercase : Dict = self.shortcut(lowerCamelCase)
hidden_state += residual
_lowercase : Tuple = self.activation(lowerCamelCase)
return hidden_state
class _lowerCamelCase( tf.keras.layers.Layer ):
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = 1, **lowerCamelCase) -> List[str]:
"""simple docstring"""
super().__init__(**lowerCamelCase)
_lowercase : Dict = in_channels != out_channels or stride != 1
_lowercase : str = max(1, out_channels // config.groups_width)
_lowercase : Tuple = (
TFRegNetShortCut(lowerCamelCase, stride=lowerCamelCase, name='shortcut')
if should_apply_shortcut
else tf.keras.layers.Activation('linear', name='shortcut')
)
_lowercase : Union[str, Any] = [
TFRegNetConvLayer(lowerCamelCase, kernel_size=1, activation=config.hidden_act, name='layer.0'),
TFRegNetConvLayer(
lowerCamelCase, stride=lowerCamelCase, groups=lowerCamelCase, activation=config.hidden_act, name='layer.1'),
TFRegNetSELayer(lowerCamelCase, reduced_channels=int(round(in_channels / 4)), name='layer.2'),
TFRegNetConvLayer(lowerCamelCase, kernel_size=1, activation=lowerCamelCase, name='layer.3'),
]
_lowercase : Any = ACTaFN[config.hidden_act]
def UpperCamelCase ( self, lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : Dict = hidden_state
for layer_module in self.layers:
_lowercase : Any = layer_module(lowerCamelCase)
_lowercase : Optional[int] = self.shortcut(lowerCamelCase)
hidden_state += residual
_lowercase : Optional[int] = self.activation(lowerCamelCase)
return hidden_state
class _lowerCamelCase( tf.keras.layers.Layer ):
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = 2, lowerCamelCase = 2, **lowerCamelCase) -> Tuple:
"""simple docstring"""
super().__init__(**lowerCamelCase)
_lowercase : Any = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
_lowercase : Any = [
# downsampling is done in the first layer with stride of 2
layer(lowerCamelCase, lowerCamelCase, lowerCamelCase, stride=lowerCamelCase, name='layers.0'),
*[layer(lowerCamelCase, lowerCamelCase, lowerCamelCase, name=F'''layers.{i+1}''') for i in range(depth - 1)],
]
def UpperCamelCase ( self, lowerCamelCase) -> Tuple:
"""simple docstring"""
for layer_module in self.layers:
_lowercase : Tuple = layer_module(lowerCamelCase)
return hidden_state
class _lowerCamelCase( tf.keras.layers.Layer ):
def __init__( self, lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
super().__init__(**lowerCamelCase)
_lowercase : List[Any] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowerCamelCase, config.embedding_size, config.hidden_sizes[0], stride=2 if config.downsample_in_first_stage else 1, depth=config.depths[0], name='stages.0', ))
_lowercase : str = zip(config.hidden_sizes, config.hidden_sizes[1:])
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowerCamelCase, config.depths[1:])):
self.stages.append(TFRegNetStage(lowerCamelCase, lowerCamelCase, lowerCamelCase, depth=lowerCamelCase, name=F'''stages.{i+1}'''))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = False, lowerCamelCase = True) -> TFBaseModelOutputWithNoAttention:
"""simple docstring"""
_lowercase : int = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_lowercase : Optional[Any] = hidden_states + (hidden_state,)
_lowercase : Tuple = stage_module(lowerCamelCase)
if output_hidden_states:
_lowercase : Tuple = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowerCamelCase, hidden_states=lowerCamelCase)
@keras_serializable
class _lowerCamelCase( tf.keras.layers.Layer ):
lowercase_ : Dict = RegNetConfig
def __init__( self, lowerCamelCase, **lowerCamelCase) -> str:
"""simple docstring"""
super().__init__(**lowerCamelCase)
_lowercase : int = config
_lowercase : List[Any] = TFRegNetEmbeddings(lowerCamelCase, name='embedder')
_lowercase : List[Any] = TFRegNetEncoder(lowerCamelCase, name='encoder')
_lowercase : Union[str, Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase, name='pooler')
@unpack_inputs
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = False, ) -> TFBaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
_lowercase : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowercase : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
_lowercase : Dict = self.embedder(lowerCamelCase, training=lowerCamelCase)
_lowercase : int = self.encoder(
lowerCamelCase, output_hidden_states=lowerCamelCase, return_dict=lowerCamelCase, training=lowerCamelCase)
_lowercase : Optional[int] = encoder_outputs[0]
_lowercase : List[Any] = self.pooler(lowerCamelCase)
# Change to NCHW output format have uniformity in the modules
_lowercase : Tuple = tf.transpose(lowerCamelCase, perm=(0, 3, 1, 2))
_lowercase : Tuple = tf.transpose(lowerCamelCase, perm=(0, 3, 1, 2))
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
_lowercase : Union[str, Any] = tuple([tf.transpose(lowerCamelCase, perm=(0, 3, 1, 2)) for h in encoder_outputs[1]])
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCamelCase, pooler_output=lowerCamelCase, hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states, )
class _lowerCamelCase( _a ):
lowercase_ : List[str] = RegNetConfig
lowercase_ : Optional[int] = """regnet"""
lowercase_ : int = """pixel_values"""
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24), dtype=tf.floataa)}
SCREAMING_SNAKE_CASE : Union[str, Any] = r"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"
SCREAMING_SNAKE_CASE : Tuple = r"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""", _a, )
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
super().__init__(lowerCamelCase, *lowerCamelCase, **lowerCamelCase)
_lowercase : List[Any] = TFRegNetMainLayer(lowerCamelCase, name='regnet')
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCamelCase)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC, output_type=lowerCamelCase, config_class=_CONFIG_FOR_DOC, modality='vision', expected_output=_EXPECTED_OUTPUT_SHAPE, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase=False, ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
"""simple docstring"""
_lowercase : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowercase : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_lowercase : str = self.regnet(
pixel_values=lowerCamelCase, output_hidden_states=lowerCamelCase, return_dict=lowerCamelCase, training=lowerCamelCase, )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state, pooler_output=outputs.pooler_output, hidden_states=outputs.hidden_states, )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""", _a, )
class _lowerCamelCase( _a, _a ):
def __init__( self, lowerCamelCase, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
super().__init__(lowerCamelCase, *lowerCamelCase, **lowerCamelCase)
_lowercase : Dict = config.num_labels
_lowercase : Optional[int] = TFRegNetMainLayer(lowerCamelCase, name='regnet')
# classification head
_lowercase : Tuple = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels, name='classifier.1') if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCamelCase)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=lowerCamelCase, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, )
def UpperCamelCase ( self, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase=False, ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
"""simple docstring"""
_lowercase : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowercase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_lowercase : Dict = self.regnet(
lowerCamelCase, output_hidden_states=lowerCamelCase, return_dict=lowerCamelCase, training=lowerCamelCase)
_lowercase : List[str] = outputs.pooler_output if return_dict else outputs[1]
_lowercase : Union[str, Any] = self.classifier[0](lowerCamelCase)
_lowercase : List[Any] = self.classifier[1](lowerCamelCase)
_lowercase : Optional[Any] = None if labels is None else self.hf_compute_loss(labels=lowerCamelCase, logits=lowerCamelCase)
if not return_dict:
_lowercase : Optional[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowerCamelCase, logits=lowerCamelCase, hidden_states=outputs.hidden_states)
| 354 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : List[Any] = logging.get_logger(__name__)
_A : Any = {
"""microsoft/trocr-base-handwritten""": (
"""https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"""
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class a__ ( a_ ):
__lowerCAmelCase = """trocr"""
__lowerCAmelCase = ["""past_key_values"""]
__lowerCAmelCase = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self , _a=50_265 , _a=1_024 , _a=12 , _a=16 , _a=4_096 , _a="gelu" , _a=512 , _a=0.1 , _a=0.0 , _a=0.0 , _a=2 , _a=0.0_2 , _a=0.0 , _a=True , _a=False , _a=True , _a=True , _a=1 , _a=0 , _a=2 , **_a , ):
lowercase : int = vocab_size
lowercase : Union[str, Any] = d_model
lowercase : str = decoder_layers
lowercase : int = decoder_attention_heads
lowercase : str = decoder_ffn_dim
lowercase : Any = activation_function
lowercase : Tuple = max_position_embeddings
lowercase : Optional[int] = dropout
lowercase : str = attention_dropout
lowercase : Optional[int] = activation_dropout
lowercase : Optional[int] = init_std
lowercase : Union[str, Any] = decoder_layerdrop
lowercase : Tuple = use_cache
lowercase : Any = scale_embedding
lowercase : List[str] = use_learned_position_embeddings
lowercase : Union[str, Any] = layernorm_embedding
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , decoder_start_token_id=_a , **_a , )
| 361 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A : Optional[int] = logging.get_logger(__name__)
def __magic_name__ ( __snake_case : str ) -> YolosConfig:
lowercase : int = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowercase : Union[str, Any] = 192
lowercase : Tuple = 768
lowercase : Optional[Any] = 12
lowercase : List[str] = 3
lowercase : int = [800, 1333]
lowercase : Union[str, Any] = False
elif yolos_name == "yolos_s_dWr":
lowercase : int = 330
lowercase : List[str] = 14
lowercase : Dict = 6
lowercase : Any = 1320
elif "yolos_s" in yolos_name:
lowercase : str = 384
lowercase : int = 1536
lowercase : Any = 12
lowercase : int = 6
elif "yolos_b" in yolos_name:
lowercase : str = [800, 1344]
lowercase : Tuple = 91
lowercase : Tuple = "huggingface/label-files"
lowercase : Any = "coco-detection-id2label.json"
lowercase : str = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type="dataset" ) , "r" ) )
lowercase : List[str] = {int(__snake_case ): v for k, v in idalabel.items()}
lowercase : Union[str, Any] = idalabel
lowercase : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def __magic_name__ ( __snake_case : dict , __snake_case : YolosConfig , __snake_case : bool = False ) -> int:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase : Dict = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
lowercase : int = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase : Union[str, Any] = in_proj_weight[: config.hidden_size, :]
lowercase : int = in_proj_bias[: config.hidden_size]
lowercase : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase : Dict = in_proj_weight[-config.hidden_size :, :]
lowercase : List[Any] = in_proj_bias[-config.hidden_size :]
def __magic_name__ ( __snake_case : str ) -> str:
if "backbone" in name:
lowercase : Union[str, Any] = name.replace("backbone" , "vit" )
if "cls_token" in name:
lowercase : Union[str, Any] = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
lowercase : Dict = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
lowercase : Optional[Any] = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
lowercase : Any = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowercase : Optional[int] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
lowercase : Union[str, Any] = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
lowercase : Optional[int] = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowercase : Dict = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowercase : List[str] = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowercase : Union[str, Any] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowercase : Union[str, Any] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowercase : List[str] = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
lowercase : List[Any] = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
lowercase : Optional[Any] = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
lowercase : Optional[int] = name.replace("vit.norm" , "vit.layernorm" )
return name
def __magic_name__ ( __snake_case : dict , __snake_case : YolosForObjectDetection ) -> dict:
for key in orig_state_dict.copy().keys():
lowercase : List[Any] = orig_state_dict.pop(__snake_case )
if "qkv" in key:
lowercase : Union[str, Any] = key.split("." )
lowercase : List[Any] = int(key_split[2] )
lowercase : Union[str, Any] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowercase : int = val[:dim, :]
lowercase : List[Any] = val[
dim : dim * 2, :
]
lowercase : Any = val[-dim:, :]
else:
lowercase : Dict = val[:dim]
lowercase : Dict = val[dim : dim * 2]
lowercase : str = val[-dim:]
else:
lowercase : List[str] = val
return orig_state_dict
def __magic_name__ ( ) -> torch.Tensor:
lowercase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase : Tuple = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def __magic_name__ ( __snake_case : str , __snake_case : str , __snake_case : str , __snake_case : bool = False ) -> Dict:
lowercase : Optional[int] = get_yolos_config(__snake_case )
# load original state_dict
lowercase : str = torch.load(__snake_case , map_location="cpu" )["model"]
# load 🤗 model
lowercase : str = YolosForObjectDetection(__snake_case )
model.eval()
lowercase : Dict = convert_state_dict(__snake_case , __snake_case )
model.load_state_dict(__snake_case )
# Check outputs on an image, prepared by YolosImageProcessor
lowercase : Any = 800 if yolos_name != "yolos_ti" else 512
lowercase : List[Any] = YolosImageProcessor(format="coco_detection" , size=__snake_case )
lowercase : Any = image_processor(images=prepare_img() , return_tensors="pt" )
lowercase : Any = model(**__snake_case )
lowercase , lowercase : Tuple = outputs.logits, outputs.pred_boxes
lowercase , lowercase : int = None, None
if yolos_name == "yolos_ti":
lowercase : int = torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]] )
lowercase : List[Any] = torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] )
elif yolos_name == "yolos_s_200_pre":
lowercase : List[Any] = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] )
lowercase : List[Any] = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] )
elif yolos_name == "yolos_s_300_pre":
lowercase : Optional[int] = torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]] )
lowercase : Tuple = torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] )
elif yolos_name == "yolos_s_dWr":
lowercase : Tuple = torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]] )
lowercase : List[str] = torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] )
elif yolos_name == "yolos_base":
lowercase : Optional[Any] = torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]] )
lowercase : List[str] = torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] )
else:
raise ValueError(f"""Unknown yolos_name: {yolos_name}""" )
assert torch.allclose(logits[0, :3, :3] , __snake_case , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __snake_case , atol=1E-4 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(f"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__snake_case )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__snake_case )
if push_to_hub:
lowercase : Optional[int] = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
lowercase : Optional[Any] = model_mapping[yolos_name]
image_processor.push_to_hub(__snake_case , organization="hustvl" )
model.push_to_hub(__snake_case , organization="hustvl" )
if __name__ == "__main__":
_A : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--yolos_name""",
default="""yolos_s_200_pre""",
type=str,
help=(
"""Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"""
""" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."""
),
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original state dict (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_A : Any = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 361 | 1 |
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=17 , __SCREAMING_SNAKE_CASE=23 , __SCREAMING_SNAKE_CASE=11 , __SCREAMING_SNAKE_CASE=True , ):
"""simple docstring"""
UpperCamelCase : Optional[int] = parent
UpperCamelCase : Optional[Any] = batch_size
UpperCamelCase : Optional[int] = seq_length
UpperCamelCase : Any = act_dim
UpperCamelCase : int = state_dim
UpperCamelCase : Dict = hidden_size
UpperCamelCase : Dict = max_length
UpperCamelCase : Tuple = is_training
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
UpperCamelCase : List[Any] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
UpperCamelCase : Tuple = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCamelCase : List[Any] = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCamelCase : List[Any] = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_000 )
UpperCamelCase : Dict = random_attention_mask((self.batch_size, self.seq_length) )
UpperCamelCase : Tuple = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def _lowercase ( self ):
"""simple docstring"""
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Any = DecisionTransformerModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : Union[str, Any] = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Optional[Any] = config_and_inputs
UpperCamelCase : Tuple = {
'''states''': states,
'''actions''': actions,
'''rewards''': rewards,
'''returns_to_go''': returns_to_go,
'''timesteps''': timesteps,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a, _a, _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = (DecisionTransformerModel,) if is_torch_available() else ()
__UpperCamelCase : List[str] = ()
__UpperCamelCase : List[str] = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
__UpperCamelCase : Tuple = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
__UpperCamelCase : List[str] = False
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : str = False
__UpperCamelCase : List[Any] = False
__UpperCamelCase : str = False
__UpperCamelCase : Any = False
__UpperCamelCase : Dict = False
__UpperCamelCase : Optional[Any] = False
__UpperCamelCase : Any = False
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = DecisionTransformerModelTester(self )
UpperCamelCase : str = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Optional[int] = DecisionTransformerModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Union[str, Any] = model_class(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : List[str] = [*signature.parameters.keys()]
UpperCamelCase : int = [
'''states''',
'''actions''',
'''rewards''',
'''returns_to_go''',
'''timesteps''',
'''attention_mask''',
]
self.assertListEqual(arg_names[: len(__SCREAMING_SNAKE_CASE )] , __SCREAMING_SNAKE_CASE )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = 2 # number of steps of autoregressive prediction we will perform
UpperCamelCase : Dict = 10 # defined by the RL environment, may be normalized
UpperCamelCase : List[Any] = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''' )
UpperCamelCase : Dict = model.to(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = model.config
torch.manual_seed(0 )
UpperCamelCase : List[Any] = torch.randn(1 , 1 , config.state_dim ).to(device=__SCREAMING_SNAKE_CASE , dtype=torch.floataa ) # env.reset()
UpperCamelCase : Dict = torch.tensor(
[[0.242_793, -0.28_693_074, 0.8_742_613], [0.67_815_274, -0.08_101_085, -0.12_952_147]] , device=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = torch.tensor(__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE , dtype=torch.floataa ).reshape(1 , 1 , 1 )
UpperCamelCase : Optional[Any] = state
UpperCamelCase : List[str] = torch.zeros(1 , 0 , config.act_dim , device=__SCREAMING_SNAKE_CASE , dtype=torch.floataa )
UpperCamelCase : Optional[Any] = torch.zeros(1 , 0 , device=__SCREAMING_SNAKE_CASE , dtype=torch.floataa )
UpperCamelCase : List[str] = torch.tensor(0 , device=__SCREAMING_SNAKE_CASE , dtype=torch.long ).reshape(1 , 1 )
for step in range(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[Any] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=__SCREAMING_SNAKE_CASE )] , dim=1 )
UpperCamelCase : Tuple = torch.cat([rewards, torch.zeros(1 , 1 , device=__SCREAMING_SNAKE_CASE )] , dim=1 )
UpperCamelCase : int = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = model(
states=__SCREAMING_SNAKE_CASE , actions=__SCREAMING_SNAKE_CASE , rewards=__SCREAMING_SNAKE_CASE , returns_to_go=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : int = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=__SCREAMING_SNAKE_CASE , dtype=torch.floataa ),
1.0,
False,
{},
)
UpperCamelCase : str = action_pred[0, -1]
UpperCamelCase : str = torch.cat([states, state] , dim=1 )
UpperCamelCase : Any = returns_to_go[0, -1] - reward
UpperCamelCase : Optional[int] = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
UpperCamelCase : Union[str, Any] = torch.cat(
[timesteps, torch.ones((1, 1) , device=__SCREAMING_SNAKE_CASE , dtype=torch.long ) * (step + 1)] , dim=1 )
| 643 |
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ ( _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : str = DebertaTokenizer
__UpperCamelCase : Optional[int] = True
__UpperCamelCase : Optional[int] = DebertaTokenizerFast
def _lowercase ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase : Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
UpperCamelCase : Tuple = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase : Any = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCamelCase : List[Any] = {'''unk_token''': '''[UNK]'''}
UpperCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def _lowercase ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = '''lower newer'''
UpperCamelCase : Union[str, Any] = '''lower newer'''
return input_text, output_text
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.get_tokenizer()
UpperCamelCase : int = '''lower newer'''
UpperCamelCase : Union[str, Any] = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
UpperCamelCase : Tuple = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = tokens + [tokenizer.unk_token]
UpperCamelCase : Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = self.get_tokenizer()
UpperCamelCase : Optional[Any] = tokenizer('''Hello''' , '''World''' )
UpperCamelCase : List[str] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , __SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
UpperCamelCase : Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
UpperCamelCase : Optional[int] = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
UpperCamelCase : str = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
UpperCamelCase : Union[str, Any] = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = [tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) for seq in encoding['''input_ids''']]
# fmt: off
UpperCamelCase : int = {
'''input_ids''': [
[1, 2_118, 11_126, 565, 35, 83, 25_191, 163, 18_854, 13, 12_156, 12, 16_101, 25_376, 13_807, 9, 22_205, 27_893, 1_635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_118, 11_126, 565, 24_536, 80, 43_797, 4_878, 7_373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3_724, 1_538, 33_183, 11_303, 43_797, 1_938, 4, 870, 24_165, 29_105, 5, 739, 32_644, 33_183, 11_303, 36_173, 88, 80, 650, 7_821, 45_940, 6, 52, 2_559, 5, 1_836, 9, 5, 7_397, 13_171, 31, 5, 1_836, 9, 32_644, 33_183, 11_303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
UpperCamelCase : List[str] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , __SCREAMING_SNAKE_CASE )
for expected, decoded in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 643 | 1 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'spiece.model'}
__UpperCAmelCase = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
__UpperCAmelCase = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
__UpperCAmelCase = '▁'
class __lowercase ( __lowerCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] ,A : Dict ,A : Union[str, Any]=True ,A : Optional[Any]=True ,A : Any=False ,A : Optional[Any]="[CLS]" ,A : Union[str, Any]="[SEP]" ,A : Dict="<unk>" ,A : Optional[Any]="[SEP]" ,A : Union[str, Any]="<pad>" ,A : int="[CLS]" ,A : Optional[int]="[MASK]" ,A : Optional[Dict[str, Any]] = None ,**A : Any ,):
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
UpperCAmelCase__ : List[Any] = (
AddedToken(A ,lstrip=A ,rstrip=A ,normalized=A )
if isinstance(A ,A )
else mask_token
)
UpperCAmelCase__ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=A ,remove_space=A ,keep_accents=A ,bos_token=A ,eos_token=A ,unk_token=A ,sep_token=A ,pad_token=A ,cls_token=A ,mask_token=A ,sp_model_kwargs=self.sp_model_kwargs ,**A ,)
UpperCAmelCase__ : List[Any] = do_lower_case
UpperCAmelCase__ : List[Any] = remove_space
UpperCAmelCase__ : str = keep_accents
UpperCAmelCase__ : List[Any] = vocab_file
UpperCAmelCase__ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
@property
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return len(self.sp_model )
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.__dict__.copy()
UpperCAmelCase__ : Any = None
return state
def __setstate__( self : List[Any] ,A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : str = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
UpperCAmelCase__ : List[str] = {}
UpperCAmelCase__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowercase ( self : Any ,A : List[str] ):
'''simple docstring'''
if self.remove_space:
UpperCAmelCase__ : Any = """ """.join(inputs.strip().split() )
else:
UpperCAmelCase__ : Union[str, Any] = inputs
UpperCAmelCase__ : Optional[int] = outputs.replace("""``""" ,"""\"""" ).replace("""''""" ,"""\"""" )
if not self.keep_accents:
UpperCAmelCase__ : Optional[Any] = unicodedata.normalize("""NFKD""" ,A )
UpperCAmelCase__ : Optional[Any] = """""".join([c for c in outputs if not unicodedata.combining(A )] )
if self.do_lower_case:
UpperCAmelCase__ : int = outputs.lower()
return outputs
def __lowercase ( self : Union[str, Any] ,A : str ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.preprocess_text(A )
UpperCAmelCase__ : str = self.sp_model.encode(A ,out_type=A )
UpperCAmelCase__ : List[Any] = []
for piece in pieces:
if len(A ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
UpperCAmelCase__ : Any = self.sp_model.EncodeAsPieces(piece[:-1].replace(A ,"""""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
UpperCAmelCase__ : Union[str, Any] = cur_pieces[1:]
else:
UpperCAmelCase__ : int = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(A )
else:
new_pieces.append(A )
return new_pieces
def __lowercase ( self : Union[str, Any] ,A : int ):
'''simple docstring'''
return self.sp_model.PieceToId(A )
def __lowercase ( self : Optional[int] ,A : str ):
'''simple docstring'''
return self.sp_model.IdToPiece(A )
def __lowercase ( self : Tuple ,A : str ):
'''simple docstring'''
UpperCAmelCase__ : int = []
UpperCAmelCase__ : Any = """"""
UpperCAmelCase__ : Optional[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A ) + token
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : str = []
else:
current_sub_tokens.append(A )
UpperCAmelCase__ : str = False
out_string += self.sp_model.decode(A )
return out_string.strip()
def __lowercase ( self : Any ,A : List[int] ,A : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : int = [self.sep_token_id]
UpperCAmelCase__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowercase ( self : str ,A : List[int] ,A : Optional[List[int]] = None ,A : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A ,token_ids_a=A ,already_has_special_tokens=A )
if token_ids_a is not None:
return [1] + ([0] * len(A )) + [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1]
def __lowercase ( self : Tuple ,A : List[int] ,A : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = [self.sep_token_id]
UpperCAmelCase__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowercase ( self : Union[str, Any] ,A : str ,A : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(A ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase__ : List[str] = os.path.join(
A ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,A )
elif not os.path.isfile(self.vocab_file ):
with open(A ,"""wb""" ) as fi:
UpperCAmelCase__ : int = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 65 |
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
__UpperCAmelCase = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
__UpperCAmelCase = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
__UpperCAmelCase = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
def __lowercase ( self : List[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) ,reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] ,)
def __lowercase ( self : Union[str, Any] ,A : List[str] ,A : List[Any] ,A : Optional[Any]=None ,A : List[str]=1 ,A : Optional[Any]="binary" ,A : Any=None ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = fa_score(
A ,A ,labels=A ,pos_label=A ,average=A ,sample_weight=A )
return {"f1": float(A ) if score.size == 1 else score}
| 65 | 1 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : List[str] = logging.get_logger(__name__)
_lowerCamelCase : Any = """▁"""
_lowerCamelCase : List[str] = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
}
_lowerCamelCase : int = {
"""vocab_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"""
),
},
"""spm_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"""
)
},
}
_lowerCamelCase : Union[str, Any] = {
"""facebook/s2t-small-librispeech-asr""": 10_24,
}
_lowerCamelCase : Union[str, Any] = ["""pt""", """fr""", """ru""", """nl""", """ro""", """it""", """es""", """de"""]
_lowerCamelCase : int = {"""mustc""": MUSTC_LANGS}
class lowerCamelCase__ ( __snake_case ):
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = MAX_MODEL_INPUT_SIZES
__UpperCAmelCase = ["""input_ids""", """attention_mask"""]
__UpperCAmelCase = []
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> None:
"""simple docstring"""
_UpperCamelCase :List[Any] ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , do_upper_case=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ , lang_codes=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
_UpperCamelCase :Tuple =do_upper_case
_UpperCamelCase :Optional[int] =do_lower_case
_UpperCamelCase :int =load_json(lowerCAmelCase__ )
_UpperCamelCase :List[Any] ={v: k for k, v in self.encoder.items()}
_UpperCamelCase :List[str] =spm_file
_UpperCamelCase :str =load_spm(lowerCAmelCase__ , self.sp_model_kwargs )
if lang_codes is not None:
_UpperCamelCase :List[str] =lang_codes
_UpperCamelCase :Dict =LANGUAGES[lang_codes]
_UpperCamelCase :Optional[Any] =[f'''<lang:{lang}>''' for lang in self.langs]
_UpperCamelCase :Tuple ={lang: self.sp_model.PieceToId(f'''<lang:{lang}>''' ) for lang in self.langs}
_UpperCamelCase :str =self.lang_tokens
_UpperCamelCase :List[str] =tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
_UpperCamelCase :int ={}
@property
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
return len(self.encoder )
@property
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
return self._tgt_lang
@tgt_lang.setter
def _UpperCamelCase ( self , lowerCAmelCase__ ) -> None:
"""simple docstring"""
_UpperCamelCase :Optional[Any] =new_tgt_lang
self.set_tgt_lang_special_tokens(lowerCAmelCase__ )
def _UpperCamelCase ( self , lowerCAmelCase__ ) -> None:
"""simple docstring"""
_UpperCamelCase :int =self.lang_code_to_id[tgt_lang]
_UpperCamelCase :Tuple =[lang_code_id]
def _UpperCamelCase ( self , lowerCAmelCase__ ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def _UpperCamelCase ( self , lowerCAmelCase__ ) -> Optional[Any]:
"""simple docstring"""
return self.encoder.get(lowerCAmelCase__ , self.encoder[self.unk_token] )
def _UpperCamelCase ( self , lowerCAmelCase__ ) -> str:
"""simple docstring"""
return self.decoder.get(lowerCAmelCase__ , self.unk_token )
def _UpperCamelCase ( self , lowerCAmelCase__ ) -> str:
"""simple docstring"""
_UpperCamelCase :int =[]
_UpperCamelCase :Optional[Any] =""""""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
_UpperCamelCase :int =self.sp_model.decode(lowerCAmelCase__ )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
_UpperCamelCase :Tuple =[]
else:
current_sub_tokens.append(lowerCAmelCase__ )
_UpperCamelCase :Any =self.sp_model.decode(lowerCAmelCase__ )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def _UpperCamelCase ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def _UpperCamelCase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
_UpperCamelCase :Optional[Any] =[1] * len(self.prefix_tokens )
_UpperCamelCase :int =[1]
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase__ )) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase__ )) + ([0] * len(lowerCAmelCase__ )) + suffix_ones
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
_UpperCamelCase :int =self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
"""simple docstring"""
_UpperCamelCase :List[str] =self.__dict__.copy()
_UpperCamelCase :Tuple =None
return state
def __setstate__( self , lowerCAmelCase__ ) -> None:
"""simple docstring"""
_UpperCamelCase :int =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_UpperCamelCase :Optional[Any] ={}
_UpperCamelCase :Tuple =load_spm(self.spm_file , self.sp_model_kwargs )
def _UpperCamelCase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
"""simple docstring"""
_UpperCamelCase :List[str] =Path(lowerCAmelCase__ )
assert save_dir.is_dir(), f'''{save_directory} should be a directory'''
_UpperCamelCase :Optional[Any] =save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
_UpperCamelCase :Optional[int] =save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , lowerCAmelCase__ )
if os.path.abspath(self.spm_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , lowerCAmelCase__ )
elif not os.path.isfile(self.spm_file ):
with open(lowerCAmelCase__ , """wb""" ) as fi:
_UpperCamelCase :str =self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (str(lowerCAmelCase__ ), str(lowerCAmelCase__ ))
def _lowerCAmelCase ( __a , __a ) -> sentencepiece.SentencePieceProcessor:
'''simple docstring'''
_UpperCamelCase :int =sentencepiece.SentencePieceProcessor(**__a )
spm.Load(str(__a ) )
return spm
def _lowerCAmelCase ( __a ) -> Union[Dict, List]:
'''simple docstring'''
with open(__a , """r""" ) as f:
return json.load(__a )
def _lowerCAmelCase ( __a , __a ) -> None:
'''simple docstring'''
with open(__a , """w""" ) as f:
json.dump(__a , __a , indent=2 ) | 512 | '''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : str = {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class lowerCamelCase__ ( __snake_case ):
__UpperCAmelCase = """mvp"""
__UpperCAmelCase = ["""past_key_values"""]
__UpperCAmelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , lowerCAmelCase__=50_267 , lowerCAmelCase__=1_024 , lowerCAmelCase__=12 , lowerCAmelCase__=4_096 , lowerCAmelCase__=16 , lowerCAmelCase__=12 , lowerCAmelCase__=4_096 , lowerCAmelCase__=16 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__="gelu" , lowerCAmelCase__=1_024 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=0.0 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__=True , lowerCAmelCase__=2 , lowerCAmelCase__=2 , lowerCAmelCase__=False , lowerCAmelCase__=100 , lowerCAmelCase__=800 , **lowerCAmelCase__ , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase :Dict =vocab_size
_UpperCamelCase :List[Any] =max_position_embeddings
_UpperCamelCase :Tuple =d_model
_UpperCamelCase :List[Any] =encoder_ffn_dim
_UpperCamelCase :Optional[int] =encoder_layers
_UpperCamelCase :List[str] =encoder_attention_heads
_UpperCamelCase :List[Any] =decoder_ffn_dim
_UpperCamelCase :Union[str, Any] =decoder_layers
_UpperCamelCase :int =decoder_attention_heads
_UpperCamelCase :Union[str, Any] =dropout
_UpperCamelCase :Tuple =attention_dropout
_UpperCamelCase :Union[str, Any] =activation_dropout
_UpperCamelCase :Optional[Any] =activation_function
_UpperCamelCase :Dict =init_std
_UpperCamelCase :Optional[Any] =encoder_layerdrop
_UpperCamelCase :List[Any] =decoder_layerdrop
_UpperCamelCase :Optional[int] =classifier_dropout
_UpperCamelCase :Optional[Any] =use_cache
_UpperCamelCase :List[Any] =encoder_layers
_UpperCamelCase :List[str] =scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCamelCase :Dict =use_prompt
_UpperCamelCase :Optional[Any] =prompt_length
_UpperCamelCase :Tuple =prompt_mid_dim
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , forced_eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , lowerCAmelCase__ ):
_UpperCamelCase :Dict =self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
"""The config can simply be saved and uploaded again to be fixed.""" ) | 512 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class _UpperCAmelCase ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__A = '''resnet'''
__A = ['''basic''', '''bottleneck''']
def __init__( self : Union[str, Any] , lowercase_ : Tuple=3 , lowercase_ : Optional[int]=64 , lowercase_ : str=[256, 512, 1024, 2048] , lowercase_ : Any=[3, 4, 6, 3] , lowercase_ : Tuple="bottleneck" , lowercase_ : Dict="relu" , lowercase_ : str=False , lowercase_ : Tuple=None , lowercase_ : int=None , **lowercase_ : Union[str, Any] , ) -> Dict:
"""simple docstring"""
super().__init__(**UpperCamelCase__)
if layer_type not in self.layer_types:
raise ValueError(f'layer_type={layer_type} is not one of {",".join(self.layer_types)}')
_UpperCamelCase = num_channels
_UpperCamelCase = embedding_size
_UpperCamelCase = hidden_sizes
_UpperCamelCase = depths
_UpperCamelCase = layer_type
_UpperCamelCase = hidden_act
_UpperCamelCase = downsample_in_first_stage
_UpperCamelCase = ['''stem'''] + [f'stage{idx}' for idx in range(1 , len(UpperCamelCase__) + 1)]
_UpperCamelCase = get_aligned_output_features_output_indices(
out_features=UpperCamelCase__ , out_indices=UpperCamelCase__ , stage_names=self.stage_names)
class _UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__A = version.parse('''1.11''' )
@property
def __UpperCAmelCase ( self : List[Any]) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def __UpperCAmelCase ( self : List[Any]) -> float:
"""simple docstring"""
return 1e-3
| 547 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case : Any = TFCamembertModel.from_pretrained('''jplu/tf-camembert-base''' )
snake_case : List[Any] = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
snake_case : Any = model(UpperCamelCase__ )['''last_hidden_state''']
snake_case : int = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , UpperCamelCase__ )
# compare the actual values for a slice.
snake_case : Dict = tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 638 | 0 |
'''simple docstring'''
def _A ( A ) -> bool:
lowercase : int = (1 + 2_4 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def _A ( A = 5_0_0_0 ) -> int:
lowercase : Union[str, Any] = [(i * (3 * i - 1)) // 2 for i in range(1 ,A )]
for i, pentagonal_i in enumerate(A ):
for j in range(A ,len(A ) ):
lowercase : Union[str, Any] = pentagonal_nums[j]
lowercase : str = pentagonal_i + pentagonal_j
lowercase : Optional[int] = pentagonal_j - pentagonal_i
if is_pentagonal(A ) and is_pentagonal(A ):
return b
return -1
if __name__ == "__main__":
print(F'''{solution() = }''') | 707 |
'''simple docstring'''
import functools
def _A ( A ,A ) -> int:
lowercase : Union[str, Any] = len(A )
lowercase : Dict = len(A )
@functools.cache
def min_distance(A ,A ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
lowercase : List[str] = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 ,A ) ,1 + min_distance(A ,indexa + 1 ) ,diff + min_distance(indexa + 1 ,indexa + 1 ) ,)
return min_distance(0 ,0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 425 | 0 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__UpperCamelCase : str = logging.getLogger(__name__)
def _UpperCAmelCase ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] ):
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class _UpperCamelCase :
'''simple docstring'''
a_ : Any = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
a_ : Optional[int] = field(
default=UpperCamelCase__,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
a_ : Dict = field(
default=UpperCamelCase__,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
a_ : Tuple = field(
default=UpperCamelCase__,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},)
@dataclass
class _UpperCamelCase :
'''simple docstring'''
a_ : Dict = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
a_ : List[Any] = field(metadata={"help": "Should contain the data files for the task."} )
a_ : Tuple = field(
default=128,metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
},)
a_ : Any = field(
default=UpperCamelCase__,metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _UpperCAmelCase ( ):
"""simple docstring"""
__lowerCamelCase : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , a__ )
# Set seed
set_seed(training_args.seed )
try:
__lowerCamelCase : Any = processors[data_args.task_name]()
__lowerCamelCase : Any = processor.get_labels()
__lowerCamelCase : Any = len(a__ )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCamelCase : Dict = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=a__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
__lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowerCamelCase : int = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=a__ , cache_dir=model_args.cache_dir , )
# Get datasets
__lowerCamelCase : Tuple = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=a__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__lowerCamelCase : Optional[Any] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=a__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(UpperCAmelCase : int ) -> Dict:
__lowerCamelCase : List[str] = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(a__ , p.label_ids )}
# Data collator
__lowerCamelCase : Tuple = DataCollatorWithPadding(a__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__lowerCamelCase : List[Any] = Trainer(
model=a__ , args=a__ , train_dataset=a__ , eval_dataset=a__ , compute_metrics=a__ , data_collator=a__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowerCamelCase : Tuple = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__lowerCamelCase : Optional[int] = trainer.evaluate()
__lowerCamelCase : List[str] = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(a__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , a__ , a__ )
writer.write("""%s = %s\n""" % (key, value) )
results.update(a__ )
return results
def _UpperCAmelCase ( UpperCAmelCase : str ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 519 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
snake_case_ : Optional[int] = '''sshleifer/mar_enro_6_3_student'''
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
super().setUp()
_SCREAMING_SNAKE_CASE =cached_path(
'''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=_a , )
_SCREAMING_SNAKE_CASE =f"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
MarianMTModel.from_pretrained(_a )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={
'''$MAX_LEN''': 64,
'''$BS''': 64,
'''$GAS''': 1,
'''$ENRO_DIR''': self.data_dir,
'''facebook/mbart-large-cc25''': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'''--learning_rate=3e-5''': '''--learning_rate 3e-4''',
'''--num_train_epochs 6''': '''--num_train_epochs 1''',
}
# Clean up bash script
_SCREAMING_SNAKE_CASE =(self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip()
_SCREAMING_SNAKE_CASE =bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
for k, v in env_vars_to_replace.items():
_SCREAMING_SNAKE_CASE =bash_script.replace(_a , str(_a ) )
_SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
_SCREAMING_SNAKE_CASE =f"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
_SCREAMING_SNAKE_CASE =['''finetune.py'''] + bash_script.split() + args
with patch.object(_a , '''argv''' , _a ):
_SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(_a )
_SCREAMING_SNAKE_CASE =SummarizationModule.add_model_specific_args(_a , os.getcwd() )
_SCREAMING_SNAKE_CASE =parser.parse_args()
_SCREAMING_SNAKE_CASE =main(_a )
# Check metrics
_SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
_SCREAMING_SNAKE_CASE =metrics['''val'''][0]
_SCREAMING_SNAKE_CASE =metrics['''val'''][-1]
self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , _a )
self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
_SCREAMING_SNAKE_CASE =os.listdir(_a )
_SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('''.ckpt''' )][0]
_SCREAMING_SNAKE_CASE =os.path.join(args.output_dir , _a )
_SCREAMING_SNAKE_CASE =torch.load(_a , map_location='''cpu''' )
_SCREAMING_SNAKE_CASE ='''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_SCREAMING_SNAKE_CASE ={os.path.basename(_a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
class A__ ( UpperCamelCase__ ):
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =f"{self.test_file_dir_str}/test_data/wmt_en_ro"
_SCREAMING_SNAKE_CASE ={
'''--fp16_opt_level=O1''': '''''',
'''$MAX_LEN''': 128,
'''$BS''': 16,
'''$GAS''': 1,
'''$ENRO_DIR''': data_dir,
'''$m''': '''sshleifer/student_marian_en_ro_6_1''',
'''val_check_interval=0.25''': '''val_check_interval=1.0''',
}
# Clean up bash script
_SCREAMING_SNAKE_CASE =(
(self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip()
)
_SCREAMING_SNAKE_CASE =bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
_SCREAMING_SNAKE_CASE =bash_script.replace('''--fp16 ''' , ''' ''' )
for k, v in env_vars_to_replace.items():
_SCREAMING_SNAKE_CASE =bash_script.replace(_a , str(_a ) )
_SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
_SCREAMING_SNAKE_CASE =bash_script.replace('''--fp16''' , '''''' )
_SCREAMING_SNAKE_CASE =6
_SCREAMING_SNAKE_CASE =(
['''distillation.py''']
+ bash_script.split()
+ [
f"--output_dir={output_dir}",
'''--gpus=1''',
'''--learning_rate=1e-3''',
f"--num_train_epochs={epochs}",
'''--warmup_steps=10''',
'''--val_check_interval=1.0''',
'''--do_predict''',
]
)
with patch.object(_a , '''argv''' , _a ):
_SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(_a )
_SCREAMING_SNAKE_CASE =SummarizationDistiller.add_model_specific_args(_a , os.getcwd() )
_SCREAMING_SNAKE_CASE =parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
_SCREAMING_SNAKE_CASE =distill_main(_a )
# Check metrics
_SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
_SCREAMING_SNAKE_CASE =metrics['''val'''][0]
_SCREAMING_SNAKE_CASE =metrics['''val'''][-1]
assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , _a )
# check lightning ckpt can be loaded and has a reasonable statedict
_SCREAMING_SNAKE_CASE =os.listdir(_a )
_SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('''.ckpt''' )][0]
_SCREAMING_SNAKE_CASE =os.path.join(args.output_dir , _a )
_SCREAMING_SNAKE_CASE =torch.load(_a , map_location='''cpu''' )
_SCREAMING_SNAKE_CASE ='''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_SCREAMING_SNAKE_CASE ={os.path.basename(_a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1 | 691 | 0 |
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class A_ ( __UpperCamelCase ):
'''simple docstring'''
def _snake_case ( self: Dict ):
__lowerCamelCase : Union[str, Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(a , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(a , 'num_attention_heads' ) )
self.parent.assertTrue(hasattr(a , 'num_encoder_blocks' ) )
class A_ :
'''simple docstring'''
def __init__( self: List[str] , a: List[str] , a: Union[str, Any]=13 , a: str=64 , a: Optional[Any]=3 , a: Any=4 , a: Optional[Any]=[2, 2, 2, 2] , a: Optional[Any]=[8, 4, 2, 1] , a: Optional[int]=[16, 32, 64, 128] , a: Tuple=[1, 4, 8, 16] , a: Tuple=[1, 2, 4, 8] , a: Optional[Any]=True , a: Dict=True , a: Any="gelu" , a: List[Any]=0.1 , a: Any=0.1 , a: Dict=0.0_2 , a: str=3 , a: List[Any]=None , ):
__lowerCamelCase : int = parent
__lowerCamelCase : str = batch_size
__lowerCamelCase : str = image_size
__lowerCamelCase : Any = num_channels
__lowerCamelCase : Dict = num_encoder_blocks
__lowerCamelCase : Tuple = sr_ratios
__lowerCamelCase : Optional[int] = depths
__lowerCamelCase : Tuple = hidden_sizes
__lowerCamelCase : Union[str, Any] = downsampling_rates
__lowerCamelCase : Any = num_attention_heads
__lowerCamelCase : Dict = is_training
__lowerCamelCase : int = use_labels
__lowerCamelCase : List[Any] = hidden_act
__lowerCamelCase : Any = hidden_dropout_prob
__lowerCamelCase : List[str] = attention_probs_dropout_prob
__lowerCamelCase : List[Any] = initializer_range
__lowerCamelCase : Optional[int] = num_labels
__lowerCamelCase : Optional[int] = scope
def _snake_case ( self: str ):
__lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : str = None
if self.use_labels:
__lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowerCamelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _snake_case ( self: Union[str, Any] ):
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _snake_case ( self: int , a: List[str] , a: List[Any] , a: int ):
__lowerCamelCase : Optional[Any] = SegformerModel(config=a )
model.to(a )
model.eval()
__lowerCamelCase : List[Any] = model(a )
__lowerCamelCase : List[str] = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def _snake_case ( self: List[str] , a: Dict , a: Optional[int] , a: str ):
__lowerCamelCase : Optional[Any] = self.num_labels
__lowerCamelCase : Optional[int] = SegformerForSemanticSegmentation(a )
model.to(a )
model.eval()
__lowerCamelCase : Optional[Any] = model(a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
__lowerCamelCase : Union[str, Any] = model(a , labels=a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def _snake_case ( self: Optional[Any] , a: List[Any] , a: Dict , a: Tuple ):
__lowerCamelCase : List[Any] = 1
__lowerCamelCase : List[Any] = SegformerForSemanticSegmentation(config=a )
model.to(a )
model.eval()
__lowerCamelCase : List[str] = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(a )
__lowerCamelCase : Any = model(a , labels=a )
self.parent.assertGreater(result.loss , 0.0 )
def _snake_case ( self: Tuple ):
__lowerCamelCase : Tuple = self.prepare_config_and_inputs()
__lowerCamelCase : Optional[int] = config_and_inputs
__lowerCamelCase : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
__snake_case = (
{
"""feature-extraction""": SegformerModel,
"""image-classification""": SegformerForImageClassification,
"""image-segmentation""": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__snake_case = True
__snake_case = False
__snake_case = False
__snake_case = False
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Union[str, Any] = SegformerModelTester(self )
__lowerCamelCase : Dict = SegformerConfigTester(self , config_class=a )
def _snake_case ( self: Optional[int] ):
self.config_tester.run_common_tests()
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _snake_case ( self: Dict ):
__lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*a )
def _snake_case ( self: str ):
__lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*a )
@unittest.skip('SegFormer does not use inputs_embeds' )
def _snake_case ( self: Union[str, Any] ):
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods' )
def _snake_case ( self: str ):
pass
def _snake_case ( self: Any ):
__lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Any = model_class(a )
__lowerCamelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : Tuple = [*signature.parameters.keys()]
__lowerCamelCase : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Dict = True
for model_class in self.all_model_classes:
__lowerCamelCase : Optional[Any] = True
__lowerCamelCase : str = False
__lowerCamelCase : Dict = True
__lowerCamelCase : int = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
__lowerCamelCase : int = model(**self._prepare_for_class(a , a ) )
__lowerCamelCase : Optional[Any] = outputs.attentions
__lowerCamelCase : Dict = sum(self.model_tester.depths )
self.assertEqual(len(a ) , a )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCamelCase : int = True
__lowerCamelCase : str = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
__lowerCamelCase : Optional[Any] = model(**self._prepare_for_class(a , a ) )
__lowerCamelCase : Any = outputs.attentions
self.assertEqual(len(a ) , a )
# verify the first attentions (first block, first layer)
__lowerCamelCase : List[str] = (self.model_tester.image_size // 4) ** 2
__lowerCamelCase : Any = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
__lowerCamelCase : str = (self.model_tester.image_size // 32) ** 2
__lowerCamelCase : Tuple = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
__lowerCamelCase : Dict = len(a )
# Check attention is always last and order is fine
__lowerCamelCase : Optional[int] = True
__lowerCamelCase : Union[str, Any] = True
__lowerCamelCase : Any = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
__lowerCamelCase : Dict = model(**self._prepare_for_class(a , a ) )
self.assertEqual(out_len + 1 , len(a ) )
__lowerCamelCase : Any = outputs.attentions
self.assertEqual(len(a ) , a )
# verify the first attentions (first block, first layer)
__lowerCamelCase : Any = (self.model_tester.image_size // 4) ** 2
__lowerCamelCase : Any = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _snake_case ( self: Optional[int] ):
def check_hidden_states_output(a: Optional[Any] , a: List[Any] , a: Tuple ):
__lowerCamelCase : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
__lowerCamelCase : Optional[int] = model(**self._prepare_for_class(a , a ) )
__lowerCamelCase : str = outputs.hidden_states
__lowerCamelCase : Any = self.model_tester.num_encoder_blocks
self.assertEqual(len(a ) , a )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : int = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase : int = True
check_hidden_states_output(a , a , a )
def _snake_case ( self: Optional[Any] ):
if not self.model_tester.is_training:
return
__lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Optional[int] = True
for model_class in self.all_model_classes:
if model_class in get_values(a ):
continue
__lowerCamelCase : Union[str, Any] = model_class(a )
model.to(a )
model.train()
__lowerCamelCase : int = self._prepare_for_class(a , a , return_labels=a )
__lowerCamelCase : Tuple = model(**a ).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _snake_case ( self: Any ):
pass
@slow
def _snake_case ( self: Optional[Any] ):
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Optional[Any] = SegformerModel.from_pretrained(a )
self.assertIsNotNone(a )
def UpperCamelCase__ ( ):
__lowerCamelCase : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _snake_case ( self: str ):
# only resize + normalize
__lowerCamelCase : int = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=a , align=a , do_random_crop=a )
__lowerCamelCase : Optional[int] = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
a )
__lowerCamelCase : int = prepare_img()
__lowerCamelCase : Optional[int] = image_processor(images=a , return_tensors='pt' )
__lowerCamelCase : Tuple = encoded_inputs.pixel_values.to(a )
with torch.no_grad():
__lowerCamelCase : Dict = model(a )
__lowerCamelCase : Optional[Any] = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , a )
__lowerCamelCase : int = torch.tensor(
[
[[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , a , atol=1e-4 ) )
@slow
def _snake_case ( self: str ):
# only resize + normalize
__lowerCamelCase : Optional[Any] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=a , align=a , do_random_crop=a )
__lowerCamelCase : Any = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024' ).to(a )
__lowerCamelCase : int = prepare_img()
__lowerCamelCase : List[str] = image_processor(images=a , return_tensors='pt' )
__lowerCamelCase : Any = encoded_inputs.pixel_values.to(a )
with torch.no_grad():
__lowerCamelCase : List[Any] = model(a )
__lowerCamelCase : List[str] = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , a )
__lowerCamelCase : Dict = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]],
] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , a , atol=1e-1 ) )
@slow
def _snake_case ( self: Any ):
# only resize + normalize
__lowerCamelCase : Any = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=a , align=a , do_random_crop=a )
__lowerCamelCase : int = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
a )
__lowerCamelCase : Union[str, Any] = prepare_img()
__lowerCamelCase : Dict = image_processor(images=a , return_tensors='pt' )
__lowerCamelCase : Any = encoded_inputs.pixel_values.to(a )
with torch.no_grad():
__lowerCamelCase : Union[str, Any] = model(a )
__lowerCamelCase : Any = outputs.logits.detach().cpu()
__lowerCamelCase : str = image_processor.post_process_semantic_segmentation(outputs=a , target_sizes=[(500, 300)] )
__lowerCamelCase : str = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , a )
__lowerCamelCase : Dict = image_processor.post_process_semantic_segmentation(outputs=a )
__lowerCamelCase : Tuple = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , a )
| 713 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = 42
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self: List[Any] , a: Optional[Any]=3 , a: Tuple=3 , a: str=("DownEncoderBlock2D",) , a: str=(64,) , a: Optional[int]=2 , a: int=32 , a: str="silu" , a: Optional[Any]=True , ):
super().__init__()
__lowerCamelCase : int = layers_per_block
__lowerCamelCase : List[Any] = torch.nn.Convad(
a , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
__lowerCamelCase : Tuple = None
__lowerCamelCase : Dict = nn.ModuleList([] )
# down
__lowerCamelCase : Optional[int] = block_out_channels[0]
for i, down_block_type in enumerate(a ):
__lowerCamelCase : str = output_channel
__lowerCamelCase : Optional[int] = block_out_channels[i]
__lowerCamelCase : Dict = i == len(a ) - 1
__lowerCamelCase : List[Any] = get_down_block(
a , num_layers=self.layers_per_block , in_channels=a , out_channels=a , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=a , resnet_groups=a , attention_head_dim=a , temb_channels=a , )
self.down_blocks.append(a )
# mid
__lowerCamelCase : Optional[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=a , output_scale_factor=1 , resnet_time_scale_shift='default' , attention_head_dim=block_out_channels[-1] , resnet_groups=a , temb_channels=a , )
# out
__lowerCamelCase : str = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=a , eps=1e-6 )
__lowerCamelCase : Optional[Any] = nn.SiLU()
__lowerCamelCase : int = 2 * out_channels if double_z else out_channels
__lowerCamelCase : Tuple = nn.Convad(block_out_channels[-1] , a , 3 , padding=1 )
__lowerCamelCase : List[Any] = False
def _snake_case ( self: List[str] , a: List[Any] ):
__lowerCamelCase : List[str] = x
__lowerCamelCase : Dict = self.conv_in(a )
if self.training and self.gradient_checkpointing:
def create_custom_forward(a: int ):
def custom_forward(*a: Optional[Any] ):
return module(*a )
return custom_forward
# down
if is_torch_version('>=' , '1.11.0' ):
for down_block in self.down_blocks:
__lowerCamelCase : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(a ) , a , use_reentrant=a )
# middle
__lowerCamelCase : Any = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , a , use_reentrant=a )
else:
for down_block in self.down_blocks:
__lowerCamelCase : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(a ) , a )
# middle
__lowerCamelCase : int = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , a )
else:
# down
for down_block in self.down_blocks:
__lowerCamelCase : List[Any] = down_block(a )
# middle
__lowerCamelCase : Union[str, Any] = self.mid_block(a )
# post-process
__lowerCamelCase : Tuple = self.conv_norm_out(a )
__lowerCamelCase : List[str] = self.conv_act(a )
__lowerCamelCase : int = self.conv_out(a )
return sample
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self: int , a: List[str]=3 , a: Tuple=3 , a: str=("UpDecoderBlock2D",) , a: Union[str, Any]=(64,) , a: Optional[Any]=2 , a: Optional[Any]=32 , a: str="silu" , a: Union[str, Any]="group" , ):
super().__init__()
__lowerCamelCase : List[Any] = layers_per_block
__lowerCamelCase : Any = nn.Convad(
a , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
__lowerCamelCase : Tuple = None
__lowerCamelCase : int = nn.ModuleList([] )
__lowerCamelCase : Optional[Any] = in_channels if norm_type == 'spatial' else None
# mid
__lowerCamelCase : List[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=a , output_scale_factor=1 , resnet_time_scale_shift='default' if norm_type == 'group' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=a , temb_channels=a , )
# up
__lowerCamelCase : Any = list(reversed(a ) )
__lowerCamelCase : Dict = reversed_block_out_channels[0]
for i, up_block_type in enumerate(a ):
__lowerCamelCase : List[Any] = output_channel
__lowerCamelCase : List[str] = reversed_block_out_channels[i]
__lowerCamelCase : Optional[Any] = i == len(a ) - 1
__lowerCamelCase : Optional[Any] = get_up_block(
a , num_layers=self.layers_per_block + 1 , in_channels=a , out_channels=a , prev_output_channel=a , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=a , resnet_groups=a , attention_head_dim=a , temb_channels=a , resnet_time_scale_shift=a , )
self.up_blocks.append(a )
__lowerCamelCase : List[str] = output_channel
# out
if norm_type == "spatial":
__lowerCamelCase : int = SpatialNorm(block_out_channels[0] , a )
else:
__lowerCamelCase : Optional[Any] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=a , eps=1e-6 )
__lowerCamelCase : Union[str, Any] = nn.SiLU()
__lowerCamelCase : List[Any] = nn.Convad(block_out_channels[0] , a , 3 , padding=1 )
__lowerCamelCase : List[str] = False
def _snake_case ( self: Optional[int] , a: Tuple , a: List[str]=None ):
__lowerCamelCase : List[str] = z
__lowerCamelCase : Union[str, Any] = self.conv_in(a )
__lowerCamelCase : List[str] = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(a: Any ):
def custom_forward(*a: str ):
return module(*a )
return custom_forward
if is_torch_version('>=' , '1.11.0' ):
# middle
__lowerCamelCase : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , a , a , use_reentrant=a )
__lowerCamelCase : str = sample.to(a )
# up
for up_block in self.up_blocks:
__lowerCamelCase : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(a ) , a , a , use_reentrant=a )
else:
# middle
__lowerCamelCase : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , a , a )
__lowerCamelCase : int = sample.to(a )
# up
for up_block in self.up_blocks:
__lowerCamelCase : List[str] = torch.utils.checkpoint.checkpoint(create_custom_forward(a ) , a , a )
else:
# middle
__lowerCamelCase : int = self.mid_block(a , a )
__lowerCamelCase : List[str] = sample.to(a )
# up
for up_block in self.up_blocks:
__lowerCamelCase : List[str] = up_block(a , a )
# post-process
if latent_embeds is None:
__lowerCamelCase : Optional[int] = self.conv_norm_out(a )
else:
__lowerCamelCase : Dict = self.conv_norm_out(a , a )
__lowerCamelCase : Any = self.conv_act(a )
__lowerCamelCase : str = self.conv_out(a )
return sample
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self: Optional[int] , a: List[Any] , a: List[Any] , a: List[Any] , a: Tuple=None , a: Tuple="random" , a: List[Any]=False , a: List[str]=True ):
super().__init__()
__lowerCamelCase : Optional[Any] = n_e
__lowerCamelCase : Optional[int] = vq_embed_dim
__lowerCamelCase : Tuple = beta
__lowerCamelCase : List[str] = legacy
__lowerCamelCase : str = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
__lowerCamelCase : str = remap
if self.remap is not None:
self.register_buffer('used' , torch.tensor(np.load(self.remap ) ) )
__lowerCamelCase : Dict = self.used.shape[0]
__lowerCamelCase : Optional[Any] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
__lowerCamelCase : Any = self.re_embed
__lowerCamelCase : Optional[int] = self.re_embed + 1
print(
F'Remapping {self.n_e} indices to {self.re_embed} indices. '
F'Using {self.unknown_index} for unknown indices.' )
else:
__lowerCamelCase : int = n_e
__lowerCamelCase : Optional[Any] = sane_index_shape
def _snake_case ( self: Tuple , a: Union[str, Any] ):
__lowerCamelCase : Optional[Any] = inds.shape
assert len(a ) > 1
__lowerCamelCase : List[Any] = inds.reshape(ishape[0] , -1 )
__lowerCamelCase : Any = self.used.to(a )
__lowerCamelCase : Union[str, Any] = (inds[:, :, None] == used[None, None, ...]).long()
__lowerCamelCase : Dict = match.argmax(-1 )
__lowerCamelCase : List[Any] = match.sum(2 ) < 1
if self.unknown_index == "random":
__lowerCamelCase : Tuple = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
__lowerCamelCase : str = self.unknown_index
return new.reshape(a )
def _snake_case ( self: Tuple , a: Optional[int] ):
__lowerCamelCase : List[Any] = inds.shape
assert len(a ) > 1
__lowerCamelCase : Optional[int] = inds.reshape(ishape[0] , -1 )
__lowerCamelCase : Union[str, Any] = self.used.to(a )
if self.re_embed > self.used.shape[0]: # extra token
__lowerCamelCase : Optional[Any] = 0 # simply set to zero
__lowerCamelCase : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , a )
return back.reshape(a )
def _snake_case ( self: int , a: List[str] ):
# reshape z -> (batch, height, width, channel) and flatten
__lowerCamelCase : Union[str, Any] = z.permute(0 , 2 , 3 , 1 ).contiguous()
__lowerCamelCase : List[Any] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
__lowerCamelCase : int = torch.argmin(torch.cdist(a , self.embedding.weight ) , dim=1 )
__lowerCamelCase : str = self.embedding(a ).view(z.shape )
__lowerCamelCase : str = None
__lowerCamelCase : Any = None
# compute loss for embedding
if not self.legacy:
__lowerCamelCase : int = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
__lowerCamelCase : List[Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
__lowerCamelCase : int = z + (z_q - z).detach()
# reshape back to match original input shape
__lowerCamelCase : Any = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
__lowerCamelCase : Optional[Any] = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
__lowerCamelCase : Optional[Any] = self.remap_to_used(a )
__lowerCamelCase : Dict = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
__lowerCamelCase : str = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _snake_case ( self: Tuple , a: Optional[int] , a: Any ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
__lowerCamelCase : Any = indices.reshape(shape[0] , -1 ) # add batch axis
__lowerCamelCase : Any = self.unmap_to_all(a )
__lowerCamelCase : int = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
__lowerCamelCase : str = self.embedding(a )
if shape is not None:
__lowerCamelCase : str = z_q.view(a )
# reshape back to match original input shape
__lowerCamelCase : Optional[Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class A_ ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self: str , a: Dict , a: Any=False ):
__lowerCamelCase : Tuple = parameters
__lowerCamelCase , __lowerCamelCase : Any = torch.chunk(a , 2 , dim=1 )
__lowerCamelCase : List[str] = torch.clamp(self.logvar , -3_0.0 , 2_0.0 )
__lowerCamelCase : int = deterministic
__lowerCamelCase : Dict = torch.exp(0.5 * self.logvar )
__lowerCamelCase : str = torch.exp(self.logvar )
if self.deterministic:
__lowerCamelCase : Optional[Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def _snake_case ( self: Union[str, Any] , a: Optional[torch.Generator] = None ):
# make sure sample is on the same device as the parameters and has same dtype
__lowerCamelCase : Union[str, Any] = randn_tensor(
self.mean.shape , generator=a , device=self.parameters.device , dtype=self.parameters.dtype )
__lowerCamelCase : str = self.mean + self.std * sample
return x
def _snake_case ( self: List[str] , a: Union[str, Any]=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def _snake_case ( self: Optional[Any] , a: str , a: Any=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
__lowerCamelCase : int = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=a )
def _snake_case ( self: Optional[int] ):
return self.mean
| 230 | 0 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'mvp'
SCREAMING_SNAKE_CASE_ = ['past_key_values']
SCREAMING_SNAKE_CASE_ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , SCREAMING_SNAKE_CASE_=50267 , SCREAMING_SNAKE_CASE_=1024 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=4096 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=4096 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=1024 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=100 , SCREAMING_SNAKE_CASE_=800 , **SCREAMING_SNAKE_CASE_ , ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = vocab_size
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = d_model
lowerCamelCase_ = encoder_ffn_dim
lowerCamelCase_ = encoder_layers
lowerCamelCase_ = encoder_attention_heads
lowerCamelCase_ = decoder_ffn_dim
lowerCamelCase_ = decoder_layers
lowerCamelCase_ = decoder_attention_heads
lowerCamelCase_ = dropout
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = activation_dropout
lowerCamelCase_ = activation_function
lowerCamelCase_ = init_std
lowerCamelCase_ = encoder_layerdrop
lowerCamelCase_ = decoder_layerdrop
lowerCamelCase_ = classifier_dropout
lowerCamelCase_ = use_cache
lowerCamelCase_ = encoder_layers
lowerCamelCase_ = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase_ = use_prompt
lowerCamelCase_ = prompt_length
lowerCamelCase_ = prompt_mid_dim
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , forced_eos_token_id=lowercase_ , **lowercase_ , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , lowercase_ ):
lowerCamelCase_ = self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
'The config can simply be saved and uploaded again to be fixed.' )
| 42 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
UpperCAmelCase_ : int = logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Dict , *lowercase_ : List[str] , **lowercase_ : Optional[int]):
'''simple docstring'''
warnings.warn(
'''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use SegformerImageProcessor instead.''' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_)
| 512 | 0 |
'''simple docstring'''
from bisect import bisect
from itertools import accumulate
def A_ ( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any:
"""simple docstring"""
__A : Optional[int] = sorted(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , key=lambda __SCREAMING_SNAKE_CASE : x[0] / x[1] , reverse=__SCREAMING_SNAKE_CASE )
__A , __A : Optional[Any] = [i[0] for i in r], [i[1] for i in r]
__A : str = list(accumulate(__SCREAMING_SNAKE_CASE ) )
__A : Any = bisect(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 499 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
A__ : Union[str, Any] =logging.get_logger(__name__)
A__ : List[str] ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
A__ : str ={
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
A__ : List[Any] ={
'allenai/led-base-16384': 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def A_ ( ) -> Tuple:
"""simple docstring"""
__A : Optional[int] = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
__A : List[Any] = bs[:]
__A : Union[str, Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__SCREAMING_SNAKE_CASE )
cs.append(2**8 + n )
n += 1
__A : int = [chr(__SCREAMING_SNAKE_CASE ) for n in cs]
return dict(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def A_ ( __SCREAMING_SNAKE_CASE : str ) -> Tuple:
"""simple docstring"""
__A : str = set()
__A : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__A : Optional[Any] = char
return pairs
class __A ( _SCREAMING_SNAKE_CASE ):
lowerCamelCase =VOCAB_FILES_NAMES
lowerCamelCase =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase =['''input_ids''', '''attention_mask''']
def __init__( self : Any , lowerCamelCase : Dict , lowerCamelCase : List[str] , lowerCamelCase : Dict="replace" , lowerCamelCase : int="<s>" , lowerCamelCase : str="</s>" , lowerCamelCase : List[str]="</s>" , lowerCamelCase : Optional[Any]="<s>" , lowerCamelCase : Any="<unk>" , lowerCamelCase : str="<pad>" , lowerCamelCase : Any="<mask>" , lowerCamelCase : Optional[int]=False , **lowerCamelCase : str , ):
"""simple docstring"""
__A : List[Any] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else bos_token
__A : Any = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else eos_token
__A : int = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else sep_token
__A : Any = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else cls_token
__A : List[Any] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else unk_token
__A : List[Any] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__A : List[str] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
super().__init__(
errors=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , add_prefix_space=lowerCamelCase , **lowerCamelCase , )
with open(lowerCamelCase , encoding="""utf-8""" ) as vocab_handle:
__A : Optional[Any] = json.load(lowerCamelCase )
__A : Dict = {v: k for k, v in self.encoder.items()}
__A : List[Any] = errors # how to handle errors in decoding
__A : Optional[int] = bytes_to_unicode()
__A : str = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase , encoding="""utf-8""" ) as merges_handle:
__A : str = merges_handle.read().split("""\n""" )[1:-1]
__A : Tuple = [tuple(merge.split() ) for merge in bpe_merges]
__A : List[Any] = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
__A : List[str] = {}
__A : List[str] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__A : Any = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowercase_( self : Any ):
"""simple docstring"""
return len(self.encoder )
def lowercase_( self : Optional[int] ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_( self : str , lowerCamelCase : Optional[int] ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
__A : Optional[Any] = tuple(lowerCamelCase )
__A : Dict = get_pairs(lowerCamelCase )
if not pairs:
return token
while True:
__A : str = min(lowerCamelCase , key=lambda lowerCamelCase : self.bpe_ranks.get(lowerCamelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__A , __A : Optional[Any] = bigram
__A : List[Any] = []
__A : Union[str, Any] = 0
while i < len(lowerCamelCase ):
try:
__A : str = word.index(lowerCamelCase , lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__A : Union[str, Any] = j
if word[i] == first and i < len(lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__A : Dict = tuple(lowerCamelCase )
__A : List[Any] = new_word
if len(lowerCamelCase ) == 1:
break
else:
__A : Optional[Any] = get_pairs(lowerCamelCase )
__A : Optional[int] = """ """.join(lowerCamelCase )
__A : str = word
return word
def lowercase_( self : str , lowerCamelCase : Optional[int] ):
"""simple docstring"""
__A : List[str] = []
for token in re.findall(self.pat , lowerCamelCase ):
__A : Optional[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase ).split(""" """ ) )
return bpe_tokens
def lowercase_( self : Dict , lowerCamelCase : Tuple ):
"""simple docstring"""
return self.encoder.get(lowerCamelCase , self.encoder.get(self.unk_token ) )
def lowercase_( self : str , lowerCamelCase : Optional[Any] ):
"""simple docstring"""
return self.decoder.get(lowerCamelCase )
def lowercase_( self : Tuple , lowerCamelCase : int ):
"""simple docstring"""
__A : Any = """""".join(lowerCamelCase )
__A : int = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def lowercase_( self : List[str] , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__A : Any = os.path.join(
lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__A : Optional[Any] = os.path.join(
lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase , ensure_ascii=lowerCamelCase ) + """\n""" )
__A : str = 0
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
""" Please check that the tokenizer is not corrupted!""" )
__A : Any = token_index
writer.write(""" """.join(lowerCamelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
def lowercase_( self : str , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A : Tuple = [self.cls_token_id]
__A : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase_( self : List[str] , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None , lowerCamelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase )) + [1]
return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1]
def lowercase_( self : List[str] , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
__A : int = [self.sep_token_id]
__A : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_( self : Dict , lowerCamelCase : int , lowerCamelCase : Dict=False , **lowerCamelCase : List[Any] ):
"""simple docstring"""
__A : Union[str, Any] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase ) > 0 and not text[0].isspace()):
__A : Dict = """ """ + text
return (text, kwargs)
def lowercase_( self : Optional[int] , lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase : Optional[int] = None , lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[bool] = None , ):
"""simple docstring"""
__A : str = super()._pad(
encoded_inputs=lowerCamelCase , max_length=lowerCamelCase , padding_strategy=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
__A : List[Any] = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__A : Tuple = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__A : List[Any] = len(encoded_inputs["""global_attention_mask"""] ) != len(lowerCamelCase )
if needs_to_be_padded:
__A : str = len(lowerCamelCase ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__A : List[Any] = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
__A : Any = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 499 | 1 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Any ):
"""simple docstring"""
super().__init__()
UpperCamelCase = nn.Linear(3 , 4 )
UpperCamelCase = nn.BatchNormad(4 )
UpperCamelCase = nn.Linear(4 , 5 )
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(SCREAMING_SNAKE_CASE__ ) ) )
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Tuple , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
return (args[0] + 1,) + args[1:], kwargs
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ):
"""simple docstring"""
return output + 1
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : str ):
"""simple docstring"""
UpperCamelCase = ModelForTest()
UpperCamelCase = ModelHook()
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(test_model._hf_hook , SCREAMING_SNAKE_CASE__ )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(SCREAMING_SNAKE_CASE__ )
self.assertFalse(hasattr(SCREAMING_SNAKE_CASE__ , '_hf_hook' ) )
self.assertFalse(hasattr(SCREAMING_SNAKE_CASE__ , '_old_forward' ) )
def __lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = ModelForTest()
UpperCamelCase = ModelHook()
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , append=SCREAMING_SNAKE_CASE__ )
self.assertEqual(isinstance(test_model._hf_hook , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(SCREAMING_SNAKE_CASE__ )
self.assertFalse(hasattr(SCREAMING_SNAKE_CASE__ , '_hf_hook' ) )
self.assertFalse(hasattr(SCREAMING_SNAKE_CASE__ , '_old_forward' ) )
def __lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = ModelForTest()
UpperCamelCase = torch.randn(2 , 3 )
UpperCamelCase = test_model(x + 1 )
UpperCamelCase = test_model(x + 2 )
UpperCamelCase = PreForwardHook()
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = test_model(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
UpperCamelCase = PreForwardHook()
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = test_model(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
UpperCamelCase = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = test_model(SCREAMING_SNAKE_CASE__ )
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-5 )
def __lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = ModelForTest()
UpperCamelCase = torch.randn(2 , 3 )
UpperCamelCase = test_model(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = PostForwardHook()
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = test_model(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
UpperCamelCase = PostForwardHook()
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = test_model(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
UpperCamelCase = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = test_model(SCREAMING_SNAKE_CASE__ )
assert torch.allclose(SCREAMING_SNAKE_CASE__ , output + 2 , atol=1e-5 )
def __lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = ModelForTest()
UpperCamelCase = torch.randn(2 , 3 )
UpperCamelCase = test_model(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = PostForwardHook()
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = test_model(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
UpperCamelCase = True
UpperCamelCase = test_model(SCREAMING_SNAKE_CASE__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def __lowerCAmelCase ( self : str ):
"""simple docstring"""
UpperCamelCase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
UpperCamelCase = torch.randn(2 , 3 )
UpperCamelCase = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(SCREAMING_SNAKE_CASE__ , AlignDevicesHook(io_same_device=SCREAMING_SNAKE_CASE__ ) )
UpperCamelCase = torch.randn(2 , 3 ).to(0 )
UpperCamelCase = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device , torch.device(0 ) )
def __lowerCAmelCase ( self : Dict ):
"""simple docstring"""
UpperCamelCase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
UpperCamelCase = {'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
UpperCamelCase = torch.device(hook_kwargs['execution_device'] )
self.assertEqual(model.batchnorm.running_mean.device , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = torch.randn(2 , 3 )
UpperCamelCase = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
UpperCamelCase = {
'execution_device': 0 if torch.cuda.is_available() else 'cpu',
'offload': True,
'offload_buffers': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
UpperCamelCase = torch.randn(2 , 3 )
UpperCamelCase = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def __lowerCAmelCase ( self : Any ):
"""simple docstring"""
UpperCamelCase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
UpperCamelCase = 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(SCREAMING_SNAKE_CASE__ , execution_device=SCREAMING_SNAKE_CASE__ , offload=SCREAMING_SNAKE_CASE__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
UpperCamelCase = torch.device(SCREAMING_SNAKE_CASE__ )
self.assertEqual(model.batchnorm.running_mean.device , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = torch.randn(2 , 3 )
UpperCamelCase = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(SCREAMING_SNAKE_CASE__ )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(SCREAMING_SNAKE_CASE__ , execution_device=SCREAMING_SNAKE_CASE__ , offload=SCREAMING_SNAKE_CASE__ , offload_buffers=SCREAMING_SNAKE_CASE__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
UpperCamelCase = torch.randn(2 , 3 )
UpperCamelCase = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(SCREAMING_SNAKE_CASE__ )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def __lowerCAmelCase ( self : Dict ):
"""simple docstring"""
UpperCamelCase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
UpperCamelCase = 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(
SCREAMING_SNAKE_CASE__ , execution_device=SCREAMING_SNAKE_CASE__ , offload=SCREAMING_SNAKE_CASE__ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
UpperCamelCase = torch.device(SCREAMING_SNAKE_CASE__ )
self.assertEqual(model.batchnorm.running_mean.device , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = torch.randn(2 , 3 )
UpperCamelCase = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(SCREAMING_SNAKE_CASE__ )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
SCREAMING_SNAKE_CASE__ , execution_device=SCREAMING_SNAKE_CASE__ , offload=SCREAMING_SNAKE_CASE__ , weights_map=model.state_dict() , offload_buffers=SCREAMING_SNAKE_CASE__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
UpperCamelCase = torch.randn(2 , 3 )
UpperCamelCase = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(SCREAMING_SNAKE_CASE__ )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
| 282 |
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def __lowerCamelCase ( _lowercase ) -> str:
UpperCamelCase = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
UpperCamelCase = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
UpperCamelCase = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCamelCase = key[key.find('patch_embed' ) + len('patch_embed' )]
UpperCamelCase = key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(_lowercase )-1}' )
if "norm" in key:
UpperCamelCase = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCamelCase = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
UpperCamelCase = key.replace(F'layer_norm{idx}' , F'layer_norm.{int(_lowercase )-1}' )
if "layer_norm1" in key:
UpperCamelCase = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
UpperCamelCase = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
UpperCamelCase = key[key.find('block' ) + len('block' )]
UpperCamelCase = key.replace(F'block{idx}' , F'block.{int(_lowercase )-1}' )
if "attn.q" in key:
UpperCamelCase = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
UpperCamelCase = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
UpperCamelCase = key.replace('attn' , 'attention.self' )
if "fc1" in key:
UpperCamelCase = key.replace('fc1' , 'dense1' )
if "fc2" in key:
UpperCamelCase = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
UpperCamelCase = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
UpperCamelCase = key.replace('linear_fuse.conv' , 'linear_fuse' )
UpperCamelCase = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCamelCase = key[key.find('linear_c' ) + len('linear_c' )]
UpperCamelCase = key.replace(F'linear_c{idx}' , F'linear_c.{int(_lowercase )-1}' )
if "bot_conv" in key:
UpperCamelCase = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
UpperCamelCase = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
UpperCamelCase = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
UpperCamelCase = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
UpperCamelCase = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
UpperCamelCase = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
UpperCamelCase = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
UpperCamelCase = key.replace('module.last_layer_depth' , 'head.head' )
UpperCamelCase = value
return new_state_dict
def __lowerCamelCase ( _lowercase , _lowercase ) -> Union[str, Any]:
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCamelCase = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.weight' )
UpperCamelCase = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
UpperCamelCase = kv_weight[
: config.hidden_sizes[i], :
]
UpperCamelCase = kv_bias[: config.hidden_sizes[i]]
UpperCamelCase = kv_weight[
config.hidden_sizes[i] :, :
]
UpperCamelCase = kv_bias[config.hidden_sizes[i] :]
def __lowerCamelCase ( ) -> Any:
UpperCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return image
@torch.no_grad()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase=False , _lowercase=None ) -> Tuple:
UpperCamelCase = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
UpperCamelCase = GLPNImageProcessor()
# prepare image
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=_lowercase , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
UpperCamelCase = torch.load(_lowercase , map_location=torch.device('cpu' ) )
# rename keys
UpperCamelCase = rename_keys(_lowercase )
# key and value matrices need special treatment
read_in_k_v(_lowercase , _lowercase )
# create HuggingFace model and load state dict
UpperCamelCase = GLPNForDepthEstimation(_lowercase )
model.load_state_dict(_lowercase )
model.eval()
# forward pass
UpperCamelCase = model(_lowercase )
UpperCamelCase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
UpperCamelCase = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
UpperCamelCase = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(F'Unknown model name: {model_name}' )
UpperCamelCase = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , _lowercase , atol=1e-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(_lowercase , _lowercase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=_lowercase , )
image_processor.push_to_hub(
repo_path_or_name=Path(_lowercase , _lowercase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=_lowercase , )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''',
default=None,
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.'''
)
parser.add_argument(
'''--model_name''',
default='''glpn-kitti''',
type=str,
help='''Name of the model in case you\'re pushing to the hub.''',
)
_snake_case = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 282 | 1 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __snake_case ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
@staticmethod
@abstractmethod
def _a ( a_ ):
raise NotImplementedError()
@abstractmethod
def _a ( self ):
raise NotImplementedError()
| 711 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def A_ ( __a : List[Any] , __a : List[str] , __a : Union[str, Any] ):
"""simple docstring"""
a__ = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, oder?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
a__ = {
"""ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""],
"""en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""],
"""en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""],
"""de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""],
}
a__ = F'''{src_lang}-{tgt_lang}'''
a__ = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(__a , exist_ok=__a )
a__ = os.path.join(__a , """README.md""" )
print(F'''Generating {path}''' )
with open(__a , """w""" , encoding="""utf-8""" ) as f:
f.write(__a )
# make sure we are under the root of the project
UpperCAmelCase = Path(__file__).resolve().parent.parent.parent
UpperCAmelCase = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = model_name.split("""-""")
UpperCAmelCase = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 351 | 0 |
'''simple docstring'''
import argparse
_UpperCamelCase : Optional[int] ="""docs/source/_static/js/custom.js"""
def lowerCamelCase_ ( A_ ):
with open(A_ , encoding='''utf-8''' , newline='''\n''' ) as f:
__lowerCamelCase = f.readlines()
__lowerCamelCase = 0
# First let's put the right version
while not lines[index].startswith('''const stableVersion =''' ):
index += 1
__lowerCamelCase = f'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith('''const versionMapping = {''' ):
index += 1
# We go until the end
while not lines[index].startswith('''}''' ):
index += 1
# We add the new version at the end
lines[index - 1] += f''' "v{version}": "v{version}",\n'''
with open(A_ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(A_ )
if __name__ == "__main__":
_UpperCamelCase : Dict =argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
_UpperCamelCase : str =parser.parse_args()
update_custom_js(args.version)
| 316 | '''simple docstring'''
import collections
import os
import re
from pathlib import Path
lowerCAmelCase_ : Any = """src/transformers"""
# Matches is_xxx_available()
lowerCAmelCase_ : Optional[int] = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
lowerCAmelCase_ : str = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCAmelCase_ : int = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
lowerCAmelCase_ : Union[str, Any] = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
lowerCAmelCase_ : int = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCAmelCase_ : List[Any] = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCAmelCase_ : Optional[Any] = re.compile(r"""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCAmelCase_ : str = re.compile(r"""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
lowerCAmelCase_ : Union[str, Any] = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
lowerCAmelCase_ : str = re.compile(r"""^\s*try:""")
# Catches a line with else:
lowerCAmelCase_ : List[Any] = re.compile(r"""^\s*else:""")
def __A ( UpperCAmelCase ) -> Dict:
'''simple docstring'''
if _re_test_backend.search(UpperCAmelCase ) is None:
return None
_UpperCamelCase : Optional[int] = [b[0] for b in _re_backend.findall(UpperCAmelCase )]
backends.sort()
return "_and_".join(UpperCAmelCase )
def __A ( UpperCAmelCase ) -> List[str]:
'''simple docstring'''
with open(UpperCAmelCase ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
_UpperCamelCase : Optional[int] = f.readlines()
_UpperCamelCase : str = 0
while line_index < len(UpperCAmelCase ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(UpperCAmelCase ):
return None
# First grab the objects without a specific backend in _import_structure
_UpperCamelCase : Tuple = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
_UpperCamelCase : str = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(UpperCAmelCase ):
_UpperCamelCase : List[Any] = _re_one_line_import_struct.search(UpperCAmelCase ).groups()[0]
_UpperCamelCase : Tuple = re.findall(R"\[([^\]]+)\]" ,UpperCAmelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
_UpperCamelCase : Optional[int] = _re_import_struct_key_value.search(UpperCAmelCase )
if single_line_import_search is not None:
_UpperCamelCase : List[str] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(UpperCAmelCase ) > 0]
objects.extend(UpperCAmelCase )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
_UpperCamelCase : int = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
_UpperCamelCase : Any = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_UpperCamelCase : int = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_UpperCamelCase : int = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
_UpperCamelCase : Tuple = lines[line_index]
if _re_import_struct_add_one.search(UpperCAmelCase ) is not None:
objects.append(_re_import_struct_add_one.search(UpperCAmelCase ).groups()[0] )
elif _re_import_struct_add_many.search(UpperCAmelCase ) is not None:
_UpperCamelCase : Optional[Any] = _re_import_struct_add_many.search(UpperCAmelCase ).groups()[0].split(", " )
_UpperCamelCase : Dict = [obj[1:-1] for obj in imports if len(UpperCAmelCase ) > 0]
objects.extend(UpperCAmelCase )
elif _re_between_brackets.search(UpperCAmelCase ) is not None:
_UpperCamelCase : Dict = _re_between_brackets.search(UpperCAmelCase ).groups()[0].split(", " )
_UpperCamelCase : Union[str, Any] = [obj[1:-1] for obj in imports if len(UpperCAmelCase ) > 0]
objects.extend(UpperCAmelCase )
elif _re_quote_object.search(UpperCAmelCase ) is not None:
objects.append(_re_quote_object.search(UpperCAmelCase ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 1_2 + "\"" ):
objects.append(line[1_3:-3] )
line_index += 1
_UpperCamelCase : List[Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_UpperCamelCase : Any = []
while (
line_index < len(UpperCAmelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
_UpperCamelCase : Optional[Any] = lines[line_index]
_UpperCamelCase : Optional[int] = _re_import.search(UpperCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
_UpperCamelCase : Any = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(UpperCAmelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
_UpperCamelCase : Optional[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_UpperCamelCase : Union[str, Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_UpperCamelCase : Optional[int] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
_UpperCamelCase : Any = lines[line_index]
_UpperCamelCase : Union[str, Any] = _re_import.search(UpperCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
_UpperCamelCase : Any = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __A ( UpperCAmelCase ,UpperCAmelCase ) -> Dict:
'''simple docstring'''
def find_duplicates(UpperCAmelCase ):
return [k for k, v in collections.Counter(UpperCAmelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_UpperCamelCase : Optional[int] = []
for key in import_dict_objects.keys():
_UpperCamelCase : Optional[int] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
_UpperCamelCase : Dict = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_UpperCamelCase : List[str] = "base imports" if key == "none" else f'''{key} backend'''
errors.append(f'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def __A ( ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : int = []
for root, _, files in os.walk(UpperCAmelCase ):
if "__init__.py" in files:
_UpperCamelCase : Dict = os.path.join(UpperCAmelCase ,"__init__.py" )
_UpperCamelCase : Any = parse_init(UpperCAmelCase )
if objects is not None:
_UpperCamelCase : Any = analyze_results(*UpperCAmelCase )
if len(UpperCAmelCase ) > 0:
_UpperCamelCase : int = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append("\n".join(UpperCAmelCase ) )
if len(UpperCAmelCase ) > 0:
raise ValueError("\n\n".join(UpperCAmelCase ) )
def __A ( ) -> str:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = []
for path, directories, files in os.walk(UpperCAmelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(UpperCAmelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(UpperCAmelCase ) / folder).glob("*.py" ) ) ) == 0:
continue
_UpperCamelCase : Optional[int] = str((Path(UpperCAmelCase ) / folder).relative_to(UpperCAmelCase ) )
_UpperCamelCase : List[Any] = short_path.replace(os.path.sep ,"." )
submodules.append(UpperCAmelCase )
for fname in files:
if fname == "__init__.py":
continue
_UpperCamelCase : Optional[Any] = str((Path(UpperCAmelCase ) / fname).relative_to(UpperCAmelCase ) )
_UpperCamelCase : int = short_path.replace(".py" ,"" ).replace(os.path.sep ,"." )
if len(submodule.split("." ) ) == 1:
submodules.append(UpperCAmelCase )
return submodules
lowerCAmelCase_ : Dict = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
"""models.esm.openfold_utils""",
]
def __A ( ) -> Optional[int]:
'''simple docstring'''
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
_UpperCamelCase : List[str] = direct_transformers_import(UpperCAmelCase )
_UpperCamelCase : Any = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(UpperCAmelCase ,"__init__.py" ) ,"r" ) as f:
_UpperCamelCase : List[Any] = f.read()
import_structure_keys.update(set(re.findall(R"import_structure\[\"([^\"]*)\"\]" ,UpperCAmelCase ) ) )
_UpperCamelCase : List[str] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(UpperCAmelCase ) > 0:
_UpperCamelCase : Any = "\n".join(f'''- {module}''' for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registed in the main init of Transformers:\n"
f'''{list_of_modules}\n'''
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 435 | 0 |
from __future__ import annotations
def snake_case( __magic_name__ ) -> list[int]:
'''simple docstring'''
lowercase : int = [True] * limit
lowercase : Optional[int] = False
lowercase : Any = False
lowercase : str = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
lowercase : Tuple = i * 2
while index < limit:
lowercase : Optional[Any] = False
lowercase : List[Any] = index + i
lowercase : List[Any] = [2]
for i in range(3 , __magic_name__ , 2 ):
if is_prime[i]:
primes.append(__magic_name__ )
return primes
def snake_case( __magic_name__ = 1_00_00_00 ) -> int:
'''simple docstring'''
lowercase : List[Any] = prime_sieve(__magic_name__ )
lowercase : Any = 0
lowercase : Tuple = 0
for i in range(len(__magic_name__ ) ):
for j in range(i + length , len(__magic_name__ ) ):
lowercase : Any = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowercase : Union[str, Any] = j - i
lowercase : str = sol
return largest
if __name__ == "__main__":
print(f'''{solution() = }''') | 596 |
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'vocab_file': 'spiece.model'}
lowerCAmelCase_ = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
}
}
lowerCAmelCase_ = {
'google/bigbird-roberta-base': 40_96,
'google/bigbird-roberta-large': 40_96,
'google/bigbird-base-trivia-itc': 40_96,
}
class _A ( _lowerCamelCase ):
_UpperCamelCase : Tuple = VOCAB_FILES_NAMES
_UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[int] = ['''input_ids''', '''attention_mask''']
_UpperCamelCase : List[int] = []
def __init__( self : Tuple , _A : Any , _A : Tuple="<unk>" , _A : int="<s>" , _A : int="</s>" , _A : Optional[int]="<pad>" , _A : Optional[int]="[SEP]" , _A : Optional[Any]="[MASK]" , _A : Union[str, Any]="[CLS]" , _A : Optional[Dict[str, Any]] = None , **_A : Dict , ) -> None:
"""simple docstring"""
lowercase : Tuple = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else bos_token
lowercase : Optional[int] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else eos_token
lowercase : List[str] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else unk_token
lowercase : Dict = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else pad_token
lowercase : List[str] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else cls_token
lowercase : Union[str, Any] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase : str = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
lowercase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_A , eos_token=_A , unk_token=_A , pad_token=_A , sep_token=_A , mask_token=_A , cls_token=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
lowercase : str = vocab_file
lowercase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_A )
@property
def __a ( self : List[str] ) -> Any:
"""simple docstring"""
return self.sp_model.get_piece_size()
def __a ( self : List[Any] ) -> Any:
"""simple docstring"""
lowercase : Dict = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ) -> Dict:
"""simple docstring"""
lowercase : int = self.__dict__.copy()
lowercase : int = None
return state
def __setstate__( self : List[str] , _A : List[Any] ) -> Dict:
"""simple docstring"""
lowercase : Optional[int] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase : int = {}
lowercase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __a ( self : List[str] , _A : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_A , out_type=_A )
def __a ( self : int , _A : Tuple ) -> Dict:
"""simple docstring"""
return self.sp_model.piece_to_id(_A )
def __a ( self : List[Any] , _A : Tuple ) -> List[str]:
"""simple docstring"""
lowercase : Optional[Any] = self.sp_model.IdToPiece(_A )
return token
def __a ( self : int , _A : int ) -> str:
"""simple docstring"""
lowercase : List[str] = []
lowercase : Optional[int] = ''''''
lowercase : int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_A ) + token
lowercase : str = True
lowercase : List[Any] = []
else:
current_sub_tokens.append(_A )
lowercase : str = False
out_string += self.sp_model.decode(_A )
return out_string.strip()
def __a ( self : str , _A : List[int] , _A : bool = False , _A : bool = None , _A : bool = True , **_A : Optional[int] , ) -> str:
"""simple docstring"""
lowercase : Dict = kwargs.pop('''use_source_tokenizer''' , _A )
lowercase : str = self.convert_ids_to_tokens(_A , skip_special_tokens=_A )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowercase : List[Any] = []
lowercase : int = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_A ) )
lowercase : str = []
sub_texts.append(_A )
else:
current_sub_text.append(_A )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_A ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
lowercase : Dict = re.sub(r''' (\[(MASK|SEP)\])''' , r'''\1''' , ''' '''.join(_A ) )
else:
lowercase : Optional[Any] = ''''''.join(_A )
lowercase : Union[str, Any] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowercase : Union[str, Any] = self.clean_up_tokenization(_A )
return clean_text
else:
return text
def __a ( self : Union[str, Any] , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase : Optional[int] = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _A )
elif not os.path.isfile(self.vocab_file ):
with open(_A , '''wb''' ) as fi:
lowercase : List[Any] = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
def __a ( self : List[str] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase : Optional[Any] = [self.cls_token_id]
lowercase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def __a ( self : Union[str, Any] , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1]
def __a ( self : str , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase : str = [self.sep_token_id]
lowercase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] | 596 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
UpperCAmelCase_ : int = logging.getLogger(__name__)
@dataclass
class UpperCamelCase :
lowerCAmelCase : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowerCAmelCase : Optional[str] = field(
default=_UpperCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowerCAmelCase : Optional[str] = field(
default=_UpperCAmelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowerCAmelCase : Optional[str] = field(
default=_UpperCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
lowerCAmelCase : bool = field(
default=_UpperCAmelCase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
lowerCAmelCase : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowerCAmelCase : bool = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class UpperCamelCase :
lowerCAmelCase : Optional[str] = field(default=_UpperCAmelCase , metadata={"""help""": """The input training data file (a text file)."""} )
lowerCAmelCase : Optional[str] = field(
default=_UpperCAmelCase , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
lowerCAmelCase : bool = field(
default=_UpperCAmelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
lowerCAmelCase : Optional[int] = field(
default=_UpperCAmelCase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
lowerCAmelCase : Optional[int] = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowerCAmelCase : bool = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
lowerCAmelCase : Optional[int] = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowerCAmelCase : Optional[int] = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def __A ( self ):
if self.train_file is not None:
A__ = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
A__ = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class UpperCamelCase :
lowerCAmelCase : PreTrainedTokenizerBase
lowerCAmelCase : Union[bool, str, PaddingStrategy] = True
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : Optional[int] = None
def __call__( self , UpperCAmelCase__ ):
A__ = "label" if "label" in features[0].keys() else "labels"
A__ = [feature.pop(UpperCAmelCase__ ) for feature in features]
A__ = len(UpperCAmelCase__ )
A__ = len(features[0]["input_ids"] )
A__ = [
[{k: v[i] for k, v in feature.items()} for i in range(UpperCAmelCase__ )] for feature in features
]
A__ = list(chain(*UpperCAmelCase__ ) )
A__ = self.tokenizer.pad(
UpperCAmelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
A__ = {k: v.view(UpperCAmelCase__ , UpperCAmelCase__ , -1 ) for k, v in batch.items()}
# Add back labels
A__ = torch.tensor(UpperCAmelCase__ , dtype=torch.intaa )
return batch
def UpperCamelCase ( )-> Tuple:
"""simple docstring"""
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A__ , A__ , A__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A__ , A__ , A__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , _A , _A )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A__ = training_args.get_process_log_level()
logger.setLevel(_A )
datasets.utils.logging.set_verbosity(_A )
transformers.utils.logging.set_verbosity(_A )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
A__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
A__ = {}
if data_args.train_file is not None:
A__ = data_args.train_file
if data_args.validation_file is not None:
A__ = data_args.validation_file
A__ = data_args.train_file.split("." )[-1]
A__ = load_dataset(
_A , data_files=_A , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
A__ = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A__ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
A__ = [f"""ending{i}""" for i in range(4 )]
A__ = "sent1"
A__ = "sent2"
if data_args.max_seq_length is None:
A__ = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
A__ = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
A__ = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(_A : Any ):
A__ = [[context] * 4 for context in examples[context_name]]
A__ = examples[question_header_name]
A__ = [
[f"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(_A )
]
# Flatten out
A__ = list(chain(*_A ) )
A__ = list(chain(*_A ) )
# Tokenize
A__ = tokenizer(
_A , _A , truncation=_A , max_length=_A , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(_A ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
A__ = raw_datasets["train"]
if data_args.max_train_samples is not None:
A__ = min(len(_A ) , data_args.max_train_samples )
A__ = train_dataset.select(range(_A ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
A__ = train_dataset.map(
_A , batched=_A , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
A__ = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
A__ = min(len(_A ) , data_args.max_eval_samples )
A__ = eval_dataset.select(range(_A ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
A__ = eval_dataset.map(
_A , batched=_A , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
A__ = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=_A , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(_A : Union[str, Any] ):
A__ , A__ = eval_predictions
A__ = np.argmax(_A , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
A__ = Trainer(
model=_A , args=_A , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=_A , data_collator=_A , compute_metrics=_A , )
# Training
if training_args.do_train:
A__ = None
if training_args.resume_from_checkpoint is not None:
A__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A__ = last_checkpoint
A__ = trainer.train(resume_from_checkpoint=_A )
trainer.save_model() # Saves the tokenizer too for easy upload
A__ = train_result.metrics
A__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_A )
)
A__ = min(_A , len(_A ) )
trainer.log_metrics("train" , _A )
trainer.save_metrics("train" , _A )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
A__ = trainer.evaluate()
A__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_A )
A__ = min(_A , len(_A ) )
trainer.log_metrics("eval" , _A )
trainer.save_metrics("eval" , _A )
A__ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**_A )
else:
trainer.create_model_card(**_A )
def UpperCamelCase ( _A : Tuple )-> Any:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 491 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class UpperCamelCase ( unittest.TestCase ):
def __A ( self ):
A__ = 10
def __A ( self ):
A__ = [1, 2, 3, 4]
A__ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __A ( self ):
A__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
A__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __A ( self ):
A__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
A__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __A ( self ):
A__ = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this."
A__ , A__ = process_story(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , [] )
def __A ( self ):
A__ = ""
A__ , A__ = process_story(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , [] )
self.assertEqual(UpperCAmelCase__ , [] )
def __A ( self ):
A__ = (
"It was the year of Our Lord one thousand seven hundred and "
"seventy-five\n\nSpiritual revelations were conceded to England "
"at that favoured period, as at this.\n@highlight\n\nIt was the best of times"
)
A__ , A__ = process_story(UpperCAmelCase__ )
A__ = [
"It was the year of Our Lord one thousand seven hundred and seventy-five.",
"Spiritual revelations were conceded to England at that favoured period, as at this.",
]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
A__ = ["It was the best of times."]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __A ( self ):
A__ = torch.tensor([1, 2, 3, 4] )
A__ = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 0 ).numpy() , expected.numpy() )
def __A ( self ):
A__ = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
A__ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 23 ).numpy() , expected.numpy() )
def __A ( self ):
A__ = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
A__ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 1 ).numpy() , expected.numpy() )
def __A ( self ):
A__ = 101
A__ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
A__ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
A__ = compute_token_type_ids(UpperCAmelCase__ , UpperCAmelCase__ )
np.testing.assert_array_equal(UpperCAmelCase__ , UpperCAmelCase__ )
| 491 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCAmelCase = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 711 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowerCAmelCase ( __snake_case ):
__lowerCAmelCase : Union[str, Any] = ['''image_processor''', '''tokenizer''']
__lowerCAmelCase : List[Any] = '''ViTImageProcessor'''
__lowerCAmelCase : Dict = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Dict , a : Optional[int]=None , a : str=None , **a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a , )
lowercase = kwargs.pop('''feature_extractor''' )
lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a , a )
def __call__( self : Optional[int] , a : str=None , a : List[Any]=None , a : List[Any]=None , a : Optional[Any]=None , **a : Dict ) -> Optional[Any]:
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError('''You have to specify either text, visual prompt or images.''' )
if text is not None and visual_prompt is not None:
raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''' )
if text is not None:
lowercase = self.tokenizer(a , return_tensors=a , **a )
if visual_prompt is not None:
lowercase = self.image_processor(a , return_tensors=a , **a )
if images is not None:
lowercase = self.image_processor(a , return_tensors=a , **a )
if visual_prompt is not None and images is not None:
lowercase = {
'''pixel_values''': image_features.pixel_values,
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
lowercase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
lowercase = {
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def _lowerCAmelCase ( self : Any , *a : str , **a : Tuple ) -> str:
"""simple docstring"""
return self.tokenizer.batch_decode(*a , **a )
def _lowerCAmelCase ( self : Optional[Any] , *a : Tuple , **a : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.decode(*a , **a )
@property
def _lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , a , )
return self.image_processor_class
@property
def _lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , a , )
return self.image_processor | 396 | 0 |
"""simple docstring"""
def lowercase ( __snake_case : float , __snake_case : float , __snake_case : int ):
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(__snake_case , __snake_case ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
lowercase_ : Tuple = rate_per_annum / 1_2
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
lowercase_ : Any = years_to_repay * 1_2
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 231 |
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowercase ( __snake_case : int ):
lowercase_ : Optional[int] = SwinConfig()
lowercase_ : str = swin_name.split('''_''' )
lowercase_ : int = name_split[1]
lowercase_ : Union[str, Any] = int(name_split[4] )
lowercase_ : List[Any] = int(name_split[3][-1] )
if model_size == "tiny":
lowercase_ : Union[str, Any] = 9_6
lowercase_ : int = (2, 2, 6, 2)
lowercase_ : List[str] = (3, 6, 1_2, 2_4)
elif model_size == "small":
lowercase_ : Optional[int] = 9_6
lowercase_ : Optional[Any] = (2, 2, 1_8, 2)
lowercase_ : Dict = (3, 6, 1_2, 2_4)
elif model_size == "base":
lowercase_ : Optional[Any] = 1_2_8
lowercase_ : int = (2, 2, 1_8, 2)
lowercase_ : List[str] = (4, 8, 1_6, 3_2)
else:
lowercase_ : List[Any] = 1_9_2
lowercase_ : int = (2, 2, 1_8, 2)
lowercase_ : str = (6, 1_2, 2_4, 4_8)
if "in22k" in swin_name:
lowercase_ : Dict = 2_1_8_4_1
else:
lowercase_ : Any = 1_0_0_0
lowercase_ : List[Any] = '''huggingface/label-files'''
lowercase_ : int = '''imagenet-1k-id2label.json'''
lowercase_ : Tuple = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='''dataset''' ) , '''r''' ) )
lowercase_ : Tuple = {int(__snake_case ): v for k, v in idalabel.items()}
lowercase_ : str = idalabel
lowercase_ : List[str] = {v: k for k, v in idalabel.items()}
lowercase_ : Optional[Any] = img_size
lowercase_ : Tuple = num_classes
lowercase_ : Optional[Any] = embed_dim
lowercase_ : Union[str, Any] = depths
lowercase_ : Dict = num_heads
lowercase_ : int = window_size
return config
def lowercase ( __snake_case : str ):
if "patch_embed.proj" in name:
lowercase_ : Dict = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowercase_ : str = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
lowercase_ : int = '''encoder.''' + name
if "attn.proj" in name:
lowercase_ : Optional[Any] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowercase_ : str = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowercase_ : Optional[Any] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowercase_ : Tuple = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowercase_ : Optional[int] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowercase_ : Any = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "norm.weight":
lowercase_ : int = '''layernorm.weight'''
if name == "norm.bias":
lowercase_ : Optional[Any] = '''layernorm.bias'''
if "head" in name:
lowercase_ : Any = name.replace('''head''' , '''classifier''' )
else:
lowercase_ : Tuple = '''swin.''' + name
return name
def lowercase ( __snake_case : List[str] , __snake_case : List[Any] ):
for key in orig_state_dict.copy().keys():
lowercase_ : List[str] = orig_state_dict.pop(__snake_case )
if "mask" in key:
continue
elif "qkv" in key:
lowercase_ : int = key.split('''.''' )
lowercase_ : str = int(key_split[1] )
lowercase_ : Optional[int] = int(key_split[3] )
lowercase_ : Tuple = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowercase_ : List[Any] = val[:dim, :]
lowercase_ : List[Any] = val[
dim : dim * 2, :
]
lowercase_ : int = val[-dim:, :]
else:
lowercase_ : Optional[Any] = val[
:dim
]
lowercase_ : Optional[Any] = val[
dim : dim * 2
]
lowercase_ : Union[str, Any] = val[
-dim:
]
else:
lowercase_ : Tuple = val
return orig_state_dict
def lowercase ( __snake_case : int , __snake_case : Optional[int] ):
lowercase_ : Any = timm.create_model(__snake_case , pretrained=__snake_case )
timm_model.eval()
lowercase_ : List[Any] = get_swin_config(__snake_case )
lowercase_ : Optional[Any] = SwinForImageClassification(__snake_case )
model.eval()
lowercase_ : Dict = convert_state_dict(timm_model.state_dict() , __snake_case )
model.load_state_dict(__snake_case )
lowercase_ : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase_ : Tuple = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swin_name.replace('''_''' , '''-''' ) ) )
lowercase_ : int = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
lowercase_ : Tuple = image_processor(images=__snake_case , return_tensors='''pt''' )
lowercase_ : List[str] = timm_model(inputs['''pixel_values'''] )
lowercase_ : List[Any] = model(**__snake_case ).logits
assert torch.allclose(__snake_case , __snake_case , atol=1e-3 )
print(F'''Saving model {swin_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__snake_case )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
__A : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swin_name''',
default='''swin_tiny_patch4_window7_224''',
type=str,
help='''Name of the Swin timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__A : Optional[Any] = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 231 | 1 |
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"""The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ , __snake_case , )
class UpperCAmelCase ( __snake_case ):
lowercase = RobertaConfig
lowercase = """roberta"""
def __init__( self : Dict , __magic_name__ : List[str] ):
"""simple docstring"""
super().__init__(__magic_name__ )
UpperCamelCase = RobertaEmbeddings(__magic_name__ )
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. """ , __snake_case , )
class UpperCAmelCase ( __snake_case ):
lowercase = RobertaConfig
lowercase = """roberta"""
def __init__( self : Optional[Any] , __magic_name__ : Dict ):
"""simple docstring"""
super().__init__(__magic_name__ )
UpperCamelCase = config.num_labels
UpperCamelCase = config.num_hidden_layers
UpperCamelCase = DeeRobertaModel(__magic_name__ )
UpperCamelCase = nn.Dropout(config.hidden_dropout_prob )
UpperCamelCase = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(__magic_name__ )
def lowerCamelCase_ ( self : Any , __magic_name__ : Tuple=None , __magic_name__ : str=None , __magic_name__ : Any=None , __magic_name__ : str=None , __magic_name__ : str=None , __magic_name__ : Optional[int]=None , __magic_name__ : List[str]=None , __magic_name__ : Tuple=-1 , __magic_name__ : Tuple=False , ):
"""simple docstring"""
UpperCamelCase = self.num_layers
try:
UpperCamelCase = self.roberta(
__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , position_ids=__magic_name__ , head_mask=__magic_name__ , inputs_embeds=__magic_name__ , )
UpperCamelCase = outputs[1]
UpperCamelCase = self.dropout(__magic_name__ )
UpperCamelCase = self.classifier(__magic_name__ )
UpperCamelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
UpperCamelCase = e.message
UpperCamelCase = e.exit_layer
UpperCamelCase = outputs[0]
if not self.training:
UpperCamelCase = entropy(__magic_name__ )
UpperCamelCase = []
UpperCamelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
UpperCamelCase = MSELoss()
UpperCamelCase = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
UpperCamelCase = CrossEntropyLoss()
UpperCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
UpperCamelCase = []
for highway_exit in outputs[-1]:
UpperCamelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(__magic_name__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
UpperCamelCase = MSELoss()
UpperCamelCase = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
UpperCamelCase = CrossEntropyLoss()
UpperCamelCase = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(__magic_name__ )
if train_highway:
UpperCamelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
UpperCamelCase = (loss,) + outputs
if not self.training:
UpperCamelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
UpperCamelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 706 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__snake_case = None
__snake_case = logging.get_logger(__name__)
__snake_case = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
__snake_case = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
__snake_case = {
"facebook/mbart-large-en-ro": 1_024,
"facebook/mbart-large-cc25": 1_024,
}
# fmt: off
__snake_case = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class UpperCAmelCase ( __snake_case ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = ["""input_ids""", """attention_mask"""]
lowercase = MBartTokenizer
lowercase = []
lowercase = []
def __init__( self : Optional[int] , __magic_name__ : str=None , __magic_name__ : Optional[int]=None , __magic_name__ : Dict="<s>" , __magic_name__ : Union[str, Any]="</s>" , __magic_name__ : List[str]="</s>" , __magic_name__ : Optional[int]="<s>" , __magic_name__ : int="<unk>" , __magic_name__ : str="<pad>" , __magic_name__ : List[str]="<mask>" , __magic_name__ : Tuple=None , __magic_name__ : Dict=None , __magic_name__ : Dict=None , **__magic_name__ : Tuple , ):
"""simple docstring"""
UpperCamelCase = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token
super().__init__(
vocab_file=__magic_name__ , tokenizer_file=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , src_lang=__magic_name__ , tgt_lang=__magic_name__ , additional_special_tokens=__magic_name__ , **__magic_name__ , )
UpperCamelCase = vocab_file
UpperCamelCase = False if not self.vocab_file else True
UpperCamelCase = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
UpperCamelCase = {
lang_code: self.convert_tokens_to_ids(__magic_name__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCamelCase = src_lang if src_lang is not None else """en_XX"""
UpperCamelCase = self.convert_tokens_to_ids(self._src_lang )
UpperCamelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def lowerCamelCase_ ( self : List[str] , __magic_name__ : str ):
"""simple docstring"""
UpperCamelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCamelCase_ ( self : Optional[Any] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCamelCase_ ( self : int , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : int , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : Optional[str] , __magic_name__ : Optional[str] , **__magic_name__ : Tuple ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
UpperCamelCase = src_lang
UpperCamelCase = self(__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
UpperCamelCase = self.convert_tokens_to_ids(__magic_name__ )
UpperCamelCase = tgt_lang_id
return inputs
def lowerCamelCase_ ( self : Dict , __magic_name__ : List[str] , __magic_name__ : str = "en_XX" , __magic_name__ : Optional[List[str]] = None , __magic_name__ : str = "ro_RO" , **__magic_name__ : int , ):
"""simple docstring"""
UpperCamelCase = src_lang
UpperCamelCase = tgt_lang
return super().prepare_seqaseq_batch(__magic_name__ , __magic_name__ , **__magic_name__ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCamelCase_ ( self : int , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.convert_tokens_to_ids(__magic_name__ )
UpperCamelCase = []
UpperCamelCase = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCamelCase_ ( self : Tuple , __magic_name__ : str ):
"""simple docstring"""
UpperCamelCase = self.convert_tokens_to_ids(__magic_name__ )
UpperCamelCase = []
UpperCamelCase = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCamelCase_ ( self : Any , __magic_name__ : str , __magic_name__ : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__magic_name__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
UpperCamelCase = os.path.join(
__magic_name__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ):
copyfile(self.vocab_file , __magic_name__ )
return (out_vocab_file,)
| 181 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = tempfile.mkdtemp()
# fmt: off
A__ = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
A__ = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
A__ = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
A__ = {'unk_token': '<unk>'}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_snake_case ) )
A__ = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073],
'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
A__ = os.path.join(self.tmpdirname , _snake_case )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_snake_case , _snake_case )
def _a ( self : Optional[int] , **_snake_case : str ):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def _a ( self : Union[str, Any] , **_snake_case : str ):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_snake_case )
def _a ( self : int , **_snake_case : List[str] ):
"""simple docstring"""
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_snake_case )
def _a ( self : int ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _a ( self : Dict ):
"""simple docstring"""
A__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(_snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a ( self : int ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
A__ = self.get_image_processor()
A__ = CLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case )
processor_slow.save_pretrained(self.tmpdirname )
A__ = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_snake_case )
A__ = CLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case )
processor_fast.save_pretrained(self.tmpdirname )
A__ = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _snake_case )
self.assertIsInstance(processor_fast.tokenizer , _snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _snake_case )
self.assertIsInstance(processor_fast.image_processor , _snake_case )
def _a ( self : Dict ):
"""simple docstring"""
A__ = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
A__ = self.get_image_processor(do_normalize=_snake_case , padding_value=1.0 )
A__ = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _snake_case )
def _a ( self : Dict ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case )
A__ = self.prepare_image_inputs()
A__ = image_processor(_snake_case , return_tensors='np' )
A__ = processor(images=_snake_case , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case )
A__ = 'lower newer'
A__ = processor(text=_snake_case )
A__ = tokenizer(_snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case )
A__ = 'lower newer'
A__ = self.prepare_image_inputs()
A__ = processor(text=_snake_case , images=_snake_case )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_snake_case ):
processor()
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case )
A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A__ = processor.batch_decode(_snake_case )
A__ = tokenizer.batch_decode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case )
A__ = 'lower newer'
A__ = self.prepare_image_inputs()
A__ = processor(text=_snake_case , images=_snake_case )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 9 |
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ =get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__( __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = XLNetTokenizer
__snake_case = XLNetTokenizerFast
__snake_case = True
__snake_case = True
def UpperCamelCase_ ( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
_SCREAMING_SNAKE_CASE : List[Any] = XLNetTokenizer(__lowerCamelCase , keep_accents=__lowerCamelCase )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = "<s>"
_SCREAMING_SNAKE_CASE : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<eod>" )
self.assertEqual(len(__lowerCamelCase ) , 1_0_0_6 )
def UpperCamelCase_ ( self ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : List[Any] = XLNetTokenizer(__lowerCamelCase , keep_accents=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(__lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] )
_SCREAMING_SNAKE_CASE : str = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] )
_SCREAMING_SNAKE_CASE : int = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = XLNetTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + "",
"i",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
] , )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["▁he", "ll", "o"] )
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : List[Any] = XLNetTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
] , )
@slow
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = XLNetTokenizer.from_pretrained("xlnet-base-cased" )
_SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.encode("sequence builders" , add_special_tokens=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def UpperCamelCase_ ( self ) -> int:
# fmt: off
_SCREAMING_SNAKE_CASE : List[Any] = {"input_ids": [[1_7, 2_1_4_4_2, 2_7_0, 1_7, 1_0, 1_4_6_4_5, 3_1_8, 3_4, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 7_7_5_2, 2_2_0_1_8, 2_3, 2_1, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 3_3_5_2, 1_4_4_3_1, 1_3, 5_5_0_0, 1_1, 1_1_7_6, 5_8_0, 1_3, 1_6_8_1_9, 4_7_9_7, 2_3, 1_7, 1_0, 1_7_1_3_5, 6_5_8, 1_9, 4_5_7, 7_9_3_2, 1_3, 1_8_4, 1_9, 3_1_5_4, 1_7_1_3_5, 6_4_6_8, 1_9, 1_4_0_4, 1_2_2_6_9, 1_9, 4_2_2_9, 5_3_5_6, 1_6_2_6_4, 4_6, 1_9, 1_7, 2_0_5_4_5, 1_0_3_9_5, 9, 9, 9, 1_1, 2_8, 6_4_2_1, 9_5_3_1, 2_0_7_2_9, 1_7, 1_0, 3_5_3, 1_7_0_2_2, 1_1, 2_1, 6_4_2_1, 9_5_3_1, 1_6_9_4_9, 1_7, 1_0, 1_1_5_0_9, 7_5_3, 1_1, 3_3, 9_5, 2_4_2_1, 7_3_8_5, 9_5_6, 1_4_4_3_1, 2_6_2_6, 2_5, 8_4_2, 7_3_8_5, 4_8_3_6, 2_1, 1_4_2_9, 2_2_7_2, 9_8_5_5, 3_1_2_0, 1_6_1, 2_4_7_3_8, 1_9, 1_3_2_0_3, 6_5_8, 2_1_8, 7_8_7, 2_1, 4_3_0, 1_8_4_8_2, 8_4_7, 2_6_3_7, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2_2, 2_2_1_7_8, 2_7, 1_0_6_4, 2_2, 9_5_6, 1_3, 1_1_1_0_1, 1_4_2_9, 5_8_5_4, 2_4_3_1_3, 1_8_9_5_3, 4_0, 4_2_2, 2_4_3_6_6, 6_8, 1_7_5_8, 3_7, 1_0_4_8_3, 1_4_2_5_7, 3_1, 2_0_7, 2_6_3, 2_1, 2_0_3, 3_7_7_3, 2_5, 7_1, 9_7_3_5, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2, 2_0_4_9, 3_4_4_2, 1_7, 1_3_8_9_4, 3_3_8_0, 2_3, 9_5, 1_8, 1_7_6_3_4, 2_2_8_8, 9, 4, 3]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name="xlnet-base-cased" , revision="c841166438c31ec7ca9a106dee7bb312b73ae511" , ) | 249 | 0 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _lowercase ( unittest.TestCase ):
def A ( self : Any ) -> Dict:
"""simple docstring"""
a = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
a = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCamelCase )
a = -1
a = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
a = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase )
a = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
a = TextStreamer(_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
a = cs.out[:-1]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def A ( self : Optional[int] ) -> Dict:
"""simple docstring"""
a = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
a = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCamelCase )
a = -1
a = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
a = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase )
a = tokenizer.decode(greedy_ids[0] )
a = TextIteratorStreamer(_lowerCamelCase )
a = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
a = Thread(target=model.generate , kwargs=_lowerCamelCase )
thread.start()
a = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def A ( self : Optional[Any] ) -> str:
"""simple docstring"""
a = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
a = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCamelCase )
a = -1
a = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
a = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase )
a = greedy_ids[:, input_ids.shape[1] :]
a = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
a = TextStreamer(_lowerCamelCase , skip_prompt=_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
a = cs.out[:-1]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def A ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
a = AutoTokenizer.from_pretrained("distilgpt2" )
a = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(_lowerCamelCase )
a = -1
a = torch.ones((1, 5) , device=_lowerCamelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
a = TextStreamer(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=1 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
a = cs.out[:-1] # Remove the final "\n"
a = tokenizer(_lowerCamelCase , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def A ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
a = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
a = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCamelCase )
a = -1
a = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
a = TextIteratorStreamer(_lowerCamelCase , timeout=0.0_0_1 )
a = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
a = Thread(target=model.generate , kwargs=_lowerCamelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_lowerCamelCase ):
a = ""
for new_text in streamer:
streamer_text += new_text
| 711 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = (UniPCMultistepScheduler,)
_UpperCAmelCase = (('''num_inference_steps''', 25),)
def A ( self : List[Any] , **__lowerCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
a = {
"num_train_timesteps": 1000,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**__lowerCAmelCase )
return config
def A ( self : List[Any] , __lowerCAmelCase : Optional[int]=0 , **__lowerCAmelCase : Optional[Any] ) -> int:
"""simple docstring"""
a = dict(self.forward_default_kwargs )
a = kwargs.pop("num_inference_steps" , __lowerCAmelCase )
a = self.dummy_sample
a = 0.1 * sample
a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config(**__lowerCAmelCase )
a = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals
a = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCAmelCase )
a = scheduler_class.from_pretrained(__lowerCAmelCase )
new_scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals
a = dummy_past_residuals[: new_scheduler.config.solver_order]
a , a = sample, sample
for t in range(__lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ):
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
a = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A ( self : List[Any] , __lowerCAmelCase : Optional[Any]=0 , **__lowerCAmelCase : List[Any] ) -> List[str]:
"""simple docstring"""
a = dict(self.forward_default_kwargs )
a = kwargs.pop("num_inference_steps" , __lowerCAmelCase )
a = self.dummy_sample
a = 0.1 * sample
a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config()
a = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
a = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCAmelCase )
a = scheduler_class.from_pretrained(__lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
a = dummy_past_residuals[: new_scheduler.config.solver_order]
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
a = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A ( self : str , __lowerCAmelCase : Any=None , **__lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
if scheduler is None:
a = self.scheduler_classes[0]
a = self.get_scheduler_config(**__lowerCAmelCase )
a = scheduler_class(**__lowerCAmelCase )
a = self.scheduler_classes[0]
a = self.get_scheduler_config(**__lowerCAmelCase )
a = scheduler_class(**__lowerCAmelCase )
a = 10
a = self.dummy_model()
a = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
a = model(__lowerCAmelCase , __lowerCAmelCase )
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
return sample
def A ( self : Any ) -> int:
"""simple docstring"""
a = dict(self.forward_default_kwargs )
a = kwargs.pop("num_inference_steps" , __lowerCAmelCase )
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config()
a = scheduler_class(**__lowerCAmelCase )
a = self.dummy_sample
a = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowerCAmelCase , "set_timesteps" ):
scheduler.set_timesteps(__lowerCAmelCase )
elif num_inference_steps is not None and not hasattr(__lowerCAmelCase , "set_timesteps" ):
a = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
a = dummy_past_residuals[: scheduler.config.solver_order]
a = scheduler.timesteps[5]
a = scheduler.timesteps[6]
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def A ( self : List[str] ) -> Dict:
"""simple docstring"""
a = UniPCMultistepScheduler(**self.get_scheduler_config() )
a = self.full_loop(scheduler=__lowerCAmelCase )
a = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
a = DPMSolverSinglestepScheduler.from_config(scheduler.config )
a = DEISMultistepScheduler.from_config(scheduler.config )
a = DPMSolverMultistepScheduler.from_config(scheduler.config )
a = UniPCMultistepScheduler.from_config(scheduler.config )
a = self.full_loop(scheduler=__lowerCAmelCase )
a = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def A ( self : List[Any] ) -> Dict:
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def A ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
self.check_over_configs(thresholding=__lowerCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__lowerCAmelCase , prediction_type=__lowerCAmelCase , sample_max_value=__lowerCAmelCase , solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , )
def A ( self : Optional[Any] ) -> Any:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def A ( self : Optional[Any] ) -> Any:
"""simple docstring"""
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , )
a = self.full_loop(
solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , )
assert not torch.isnan(__lowerCAmelCase ).any(), "Samples have nan numbers"
def A ( self : Optional[int] ) -> Any:
"""simple docstring"""
self.check_over_configs(lower_order_final=__lowerCAmelCase )
self.check_over_configs(lower_order_final=__lowerCAmelCase )
def A ( self : Dict ) -> str:
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=__lowerCAmelCase , time_step=0 )
def A ( self : Dict ) -> int:
"""simple docstring"""
a = self.full_loop()
a = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def A ( self : Optional[int] ) -> int:
"""simple docstring"""
a = self.full_loop(prediction_type="v_prediction" )
a = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1E-3
def A ( self : Union[str, Any] ) -> str:
"""simple docstring"""
a = self.scheduler_classes[0]
a = self.get_scheduler_config(thresholding=__lowerCAmelCase , dynamic_thresholding_ratio=0 )
a = scheduler_class(**__lowerCAmelCase )
a = 10
a = self.dummy_model()
a = self.dummy_sample_deter.half()
scheduler.set_timesteps(__lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
a = model(__lowerCAmelCase , __lowerCAmelCase )
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
def A ( self : List[str] , **__lowerCAmelCase : int ) -> Dict:
"""simple docstring"""
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config(**__lowerCAmelCase )
a = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 32 | 0 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_lowerCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = ["pixel_values"]
def __init__( self : List[str] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , _A : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **_A : List[str] , ):
super().__init__(**_A )
_UpperCamelCase = size if size is not None else {'''shortest_edge''': 224}
_UpperCamelCase = get_size_dict(_A , default_to_square=_A )
_UpperCamelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_UpperCamelCase = get_size_dict(_A , param_name='''crop_size''' )
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = resample
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase_ ( self : Optional[int] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : List[Any] , ):
_UpperCamelCase = get_size_dict(_A , default_to_square=_A )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
_UpperCamelCase = int((256 / 224) * size['''shortest_edge'''] )
_UpperCamelCase = get_resize_output_image_size(_A , size=_A , default_to_square=_A )
_UpperCamelCase = {'''height''': output_size[0], '''width''': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
_A , size=(size_dict['''height'''], size_dict['''width''']) , resample=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self : List[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ):
_UpperCamelCase = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A )
def UpperCamelCase_ ( self : Optional[int] , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ):
return rescale(_A , scale=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self : Any , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ):
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self : str , _A : ImageInput , _A : Optional[bool] = None , _A : Optional[Dict[str, int]] = None , _A : PILImageResampling = None , _A : Optional[bool] = None , _A : Optional[Dict[str, int]] = None , _A : Optional[bool] = None , _A : Optional[float] = None , _A : Optional[bool] = None , _A : Optional[Union[float, Iterable[float]]] = None , _A : Optional[Union[float, Iterable[float]]] = None , _A : Optional[TensorType] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : Optional[int] , ):
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(_A , default_to_square=_A )
_UpperCamelCase = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase = get_size_dict(_A , param_name='''crop_size''' )
_UpperCamelCase = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(_A ) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(_A , _A , _A ) for image in images]
if do_center_crop:
_UpperCamelCase = [self.center_crop(_A , _A ) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(_A , _A ) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(_A , _A , _A ) for image in images]
_UpperCamelCase = [to_channel_dimension_format(_A , _A ) for image in images]
_UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=_A , tensor_type=_A )
| 10 | import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : Union[str, Any] , _A : Optional[Any] , _A : Any=13 , _A : Union[str, Any]=7 , _A : List[str]=True , _A : List[str]=True , _A : List[str]=True , _A : List[str]=True , _A : List[Any]=True , _A : Optional[int]=False , _A : Any=False , _A : int=False , _A : Optional[Any]=2 , _A : Any=99 , _A : str=0 , _A : Union[str, Any]=32 , _A : List[Any]=5 , _A : Tuple=4 , _A : List[str]=0.1 , _A : Union[str, Any]=0.1 , _A : int=512 , _A : Union[str, Any]=12 , _A : List[str]=2 , _A : int=0.02 , _A : Optional[Any]=3 , _A : Any=4 , _A : Optional[int]="last" , _A : Any=None , _A : Dict=None , ):
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_lengths
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = gelu_activation
_UpperCamelCase = sinusoidal_embeddings
_UpperCamelCase = causal
_UpperCamelCase = asm
_UpperCamelCase = n_langs
_UpperCamelCase = vocab_size
_UpperCamelCase = n_special
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = summary_type
_UpperCamelCase = use_proj
_UpperCamelCase = scope
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
if self.use_input_lengths:
_UpperCamelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase = ids_tensor([self.batch_size] , 2 ).float()
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCamelCase_ ( self : str ):
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def UpperCamelCase_ ( self : str , _A : Union[str, Any] , _A : Optional[Any] , _A : str , _A : Tuple , _A : List[str] , _A : List[Any] , _A : Any , _A : str , _A : Optional[int] , ):
_UpperCamelCase = FlaubertModel(config=_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A , lengths=_A , langs=_A )
_UpperCamelCase = model(_A , langs=_A )
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Tuple , _A : List[Any] , _A : str , _A : Optional[int] , _A : Optional[Any] , _A : List[str] , _A : int , _A : str , _A : List[Any] , _A : Any , ):
_UpperCamelCase = FlaubertWithLMHeadModel(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : Tuple , _A : List[str] , _A : List[str] , _A : Optional[Any] , _A : Union[str, Any] , _A : str , _A : List[str] , _A : Tuple , _A : Optional[int] , _A : Dict , ):
_UpperCamelCase = FlaubertForQuestionAnsweringSimple(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A )
_UpperCamelCase = model(_A , start_positions=_A , end_positions=_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : Tuple , _A : str , _A : Tuple , _A : Tuple , _A : Union[str, Any] , _A : List[str] , _A : int , _A : str , _A : Dict , _A : List[Any] , ):
_UpperCamelCase = FlaubertForQuestionAnswering(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A )
_UpperCamelCase = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , p_mask=_A , )
_UpperCamelCase = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , )
((_UpperCamelCase) , ) = result_with_labels.to_tuple()
_UpperCamelCase = model(_A , start_positions=_A , end_positions=_A )
((_UpperCamelCase) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCamelCase_ ( self : List[Any] , _A : Union[str, Any] , _A : Tuple , _A : str , _A : int , _A : int , _A : Optional[int] , _A : Optional[int] , _A : int , _A : List[str] , ):
_UpperCamelCase = FlaubertForSequenceClassification(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A )
_UpperCamelCase = model(_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self : Optional[int] , _A : List[str] , _A : Optional[Any] , _A : str , _A : Union[str, Any] , _A : List[Any] , _A : int , _A : List[Any] , _A : str , _A : List[str] , ):
_UpperCamelCase = self.num_labels
_UpperCamelCase = FlaubertForTokenClassification(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Tuple , _A : Dict , _A : str , _A : Optional[Any] , _A : List[str] , _A : Any , _A : Optional[int] , _A : Optional[Any] , _A : List[Any] , _A : List[str] , ):
_UpperCamelCase = self.num_choices
_UpperCamelCase = FlaubertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
_UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( __lowercase, __lowercase, unittest.TestCase ):
UpperCAmelCase = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCAmelCase = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase_ ( self : Union[str, Any] , _A : Dict , _A : Dict , _A : Tuple , _A : int , _A : Any ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCamelCase_ ( self : str , _A : Any , _A : List[str] , _A : Optional[int]=False ):
_UpperCamelCase = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
_UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
_UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = FlaubertModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=_A , emb_dim=37 )
def UpperCamelCase_ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_A )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_A )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*_A )
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_A )
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*_A )
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*_A )
@slow
def UpperCamelCase_ ( self : str ):
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = FlaubertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@slow
@require_torch_gpu
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
_UpperCamelCase = True
_UpperCamelCase = model_class(config=_A )
_UpperCamelCase = self._prepare_for_class(_A , _A )
_UpperCamelCase = torch.jit.trace(
_A , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_A , os.path.join(_A , '''traced_model.pt''' ) )
_UpperCamelCase = torch.jit.load(os.path.join(_A , '''traced_model.pt''' ) , map_location=_A )
loaded(inputs_dict['''input_ids'''].to(_A ) , inputs_dict['''attention_mask'''].to(_A ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
_UpperCamelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
_UpperCamelCase = model(_A )[0]
_UpperCamelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _A )
_UpperCamelCase = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _A , atol=1e-4 ) )
| 10 | 1 |
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class _UpperCamelCase ( _A ):
'''simple docstring'''
def lowercase__ ( self ):
"""simple docstring"""
a__ = SMALL_MODEL_IDENTIFIER
a__ = 'pt'
a__ = 'tf'
def lowercase__ ( self , _a ):
"""simple docstring"""
a__ = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_a )
def lowercase__ ( self , _a ):
"""simple docstring"""
a__ = TFAutoModel.from_pretrained(self.test_model , from_pt=_a )
model_tf.save_pretrained(_a )
def lowercase__ ( self ):
"""simple docstring"""
a__ = 'mock_framework'
# Framework provided - return whatever the user provides
a__ = FeaturesManager.determine_framework(self.test_model , _a )
self.assertEqual(_a , _a )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_a )
a__ = FeaturesManager.determine_framework(_a , _a )
self.assertEqual(_a , _a )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_a )
a__ = FeaturesManager.determine_framework(_a , _a )
self.assertEqual(_a , _a )
def lowercase__ ( self ):
"""simple docstring"""
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_a )
a__ = FeaturesManager.determine_framework(_a )
self.assertEqual(_a , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_a )
a__ = FeaturesManager.determine_framework(_a )
self.assertEqual(_a , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_a ):
a__ = FeaturesManager.determine_framework(_a )
def lowercase__ ( self ):
"""simple docstring"""
a__ = MagicMock(return_value=_a )
with patch('transformers.onnx.features.is_tf_available' , _a ):
a__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_a , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
a__ = MagicMock(return_value=_a )
with patch('transformers.onnx.features.is_torch_available' , _a ):
a__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_a , self.framework_tf )
# Both in environment -> use PyTorch
a__ = MagicMock(return_value=_a )
a__ = MagicMock(return_value=_a )
with patch('transformers.onnx.features.is_tf_available' , _a ), patch(
'transformers.onnx.features.is_torch_available' , _a ):
a__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_a , self.framework_pt )
# Both not in environment -> raise error
a__ = MagicMock(return_value=_a )
a__ = MagicMock(return_value=_a )
with patch('transformers.onnx.features.is_tf_available' , _a ), patch(
'transformers.onnx.features.is_torch_available' , _a ):
with self.assertRaises(_a ):
a__ = FeaturesManager.determine_framework(self.test_model )
| 126 |
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
__A : Any = logging.get_logger(__name__)
def lowerCAmelCase_ ( a : List[Any] , a : List[Any] , a : Optional[Any] ):
a__ = UniSpeechSatForSequenceClassification.from_pretrained(a , config=a )
a__ = downstream_dict['projector.weight']
a__ = downstream_dict['projector.bias']
a__ = downstream_dict['model.post_net.linear.weight']
a__ = downstream_dict['model.post_net.linear.bias']
return model
def lowerCAmelCase_ ( a : Any , a : List[Any] , a : List[Any] ):
a__ = UniSpeechSatForAudioFrameClassification.from_pretrained(a , config=a )
a__ = downstream_dict['model.linear.weight']
a__ = downstream_dict['model.linear.bias']
return model
def lowerCAmelCase_ ( a : Optional[Any] , a : Dict , a : Optional[int] ):
a__ = UniSpeechSatForXVector.from_pretrained(a , config=a )
a__ = downstream_dict['connector.weight']
a__ = downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
a__ = downstream_dict[
f'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
a__ = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
a__ = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
a__ = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
a__ = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
a__ = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
a__ = downstream_dict['objective.W']
return model
@torch.no_grad()
def lowerCAmelCase_ ( a : Any , a : List[str] , a : List[str] , a : int ):
a__ = torch.load(a , map_location='cpu' )
a__ = checkpoint['Downstream']
a__ = UniSpeechSatConfig.from_pretrained(a )
a__ = WavaVecaFeatureExtractor.from_pretrained(
a , return_attention_mask=a , do_normalize=a )
a__ = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification' ):
a__ = convert_classification(a , a , a )
elif arch.endswith('ForAudioFrameClassification' ):
a__ = convert_diarization(a , a , a )
elif arch.endswith('ForXVector' ):
a__ = convert_xvector(a , a , a )
else:
raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
a__ = checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(a )
hf_model.save_pretrained(a )
if __name__ == "__main__":
__A : str = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
__A : Union[str, Any] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 126 | 1 |
def lowerCamelCase__ (_UpperCAmelCase = 50):
SCREAMING_SNAKE_CASE = [[0] * 3 for _ in range(length + 1)]
for row_length in range(length + 1):
for tile_length in range(2 , 5):
for tile_start in range(row_length - tile_length + 1):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length])
if __name__ == "__main__":
print(f"""{solution() = }""")
| 73 |
'''simple docstring'''
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = (DDIMParallelScheduler,)
_lowerCamelCase = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def __UpperCamelCase ( self : str , **UpperCAmelCase_ : Union[str, Any]):
UpperCamelCase__ : Optional[int] = {
'num_train_timesteps': 1_000,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**UpperCAmelCase_)
return config
def __UpperCamelCase ( self : Optional[Any] , **UpperCAmelCase_ : Optional[Any]):
UpperCamelCase__ : List[Any] = self.scheduler_classes[0]
UpperCamelCase__ : Optional[Any] = self.get_scheduler_config(**UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = scheduler_class(**UpperCAmelCase_)
UpperCamelCase__, UpperCamelCase__ : Dict = 10, 0.0
UpperCamelCase__ : Any = self.dummy_model()
UpperCamelCase__ : Union[str, Any] = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase_)
for t in scheduler.timesteps:
UpperCamelCase__ : Union[str, Any] = model(UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : Tuple = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_).prev_sample
return sample
def __UpperCamelCase ( self : Any):
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase_)
def __UpperCamelCase ( self : str):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=UpperCAmelCase_)
UpperCamelCase__ : Any = self.scheduler_classes[0]
UpperCamelCase__ : List[Any] = self.get_scheduler_config(steps_offset=1)
UpperCamelCase__ : Any = scheduler_class(**UpperCAmelCase_)
scheduler.set_timesteps(5)
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1]))
def __UpperCamelCase ( self : Any):
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=UpperCAmelCase_ , beta_end=UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any]):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase_)
def __UpperCamelCase ( self : Any):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any]):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase_)
def __UpperCamelCase ( self : Tuple):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=UpperCAmelCase_)
def __UpperCamelCase ( self : Dict):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=UpperCAmelCase_)
def __UpperCamelCase ( self : int):
self.check_over_configs(thresholding=UpperCAmelCase_)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase_ , prediction_type=UpperCAmelCase_ , sample_max_value=UpperCAmelCase_ , )
def __UpperCamelCase ( self : Tuple):
for t in [1, 10, 49]:
self.check_over_forward(time_step=UpperCAmelCase_)
def __UpperCamelCase ( self : int):
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500]):
self.check_over_forward(time_step=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[Any]):
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0]):
self.check_over_forward(time_step=UpperCAmelCase_ , eta=UpperCAmelCase_)
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Any = self.scheduler_classes[0]
UpperCamelCase__ : Optional[int] = self.get_scheduler_config()
UpperCamelCase__ : Tuple = scheduler_class(**UpperCAmelCase_)
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400) - 0.1_47_71)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960) - 0.3_24_60)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486) - 0.0_09_79)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998) - 0.02)) < 1e-5
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Union[str, Any] = self.scheduler_classes[0]
UpperCamelCase__ : Optional[Any] = self.get_scheduler_config()
UpperCamelCase__ : Optional[Any] = scheduler_class(**UpperCAmelCase_)
UpperCamelCase__, UpperCamelCase__ : str = 10, 0.0
scheduler.set_timesteps(UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.dummy_model()
UpperCamelCase__ : Optional[Any] = self.dummy_sample_deter
UpperCamelCase__ : Optional[Any] = self.dummy_sample_deter + 0.1
UpperCamelCase__ : str = self.dummy_sample_deter - 0.1
UpperCamelCase__ : List[Any] = samplea.shape[0]
UpperCamelCase__ : List[str] = torch.stack([samplea, samplea, samplea] , dim=0)
UpperCamelCase__ : Optional[Any] = torch.arange(UpperCAmelCase_)[0:3, None].repeat(1 , UpperCAmelCase_)
UpperCamelCase__ : List[str] = model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
UpperCamelCase__ : List[str] = scheduler.batch_step_no_noise(UpperCAmelCase_ , timesteps.flatten(0 , 1) , samples.flatten(0 , 1) , UpperCAmelCase_)
UpperCamelCase__ : Any = torch.sum(torch.abs(UpperCAmelCase_))
UpperCamelCase__ : Union[str, Any] = torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 11_47.79_04) < 1e-2
assert abs(result_mean.item() - 0.49_82) < 1e-3
def __UpperCamelCase ( self : str):
UpperCamelCase__ : int = self.full_loop()
UpperCamelCase__ : Any = torch.sum(torch.abs(UpperCAmelCase_))
UpperCamelCase__ : List[Any] = torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 1_72.00_67) < 1e-2
assert abs(result_mean.item() - 0.22_39_67) < 1e-3
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Tuple = self.full_loop(prediction_type='v_prediction')
UpperCamelCase__ : Union[str, Any] = torch.sum(torch.abs(UpperCAmelCase_))
UpperCamelCase__ : Optional[int] = torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 52.53_02) < 1e-2
assert abs(result_mean.item() - 0.06_84) < 1e-3
def __UpperCamelCase ( self : Tuple):
# We specify different beta, so that the first alpha is 0.99
UpperCamelCase__ : int = self.full_loop(set_alpha_to_one=UpperCAmelCase_ , beta_start=0.01)
UpperCamelCase__ : Optional[int] = torch.sum(torch.abs(UpperCAmelCase_))
UpperCamelCase__ : List[Any] = torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 1_49.82_95) < 1e-2
assert abs(result_mean.item() - 0.19_51) < 1e-3
def __UpperCamelCase ( self : str):
# We specify different beta, so that the first alpha is 0.99
UpperCamelCase__ : Dict = self.full_loop(set_alpha_to_one=UpperCAmelCase_ , beta_start=0.01)
UpperCamelCase__ : Union[str, Any] = torch.sum(torch.abs(UpperCAmelCase_))
UpperCamelCase__ : str = torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 1_49.07_84) < 1e-2
assert abs(result_mean.item() - 0.19_41) < 1e-3
| 596 | 0 |
'''simple docstring'''
def UpperCamelCase ( lowercase_ : int ) -> bool:
'''simple docstring'''
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('''Program to check whether a number is a Perfect number or not...''')
_UpperCAmelCase : List[Any] = int(input('''Enter number: ''').strip())
print(F"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 711 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def UpperCamelCase ( lowercase_ : str = "AAPL" ) -> str:
'''simple docstring'''
lowercase =f'https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'
lowercase =BeautifulSoup(requests.get(lowercase_ ).text , '''html.parser''' )
lowercase ='''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 145 | 0 |
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
while b:
__magic_name__ , __magic_name__ :List[str] = b, a % b
return a
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(snake_case, a % b )
def __lowercase ( ):
"""simple docstring"""
print(f'''euclidean_gcd(3, 5) = {euclidean_gcd(3, 5 )}''' )
print(f'''euclidean_gcd(5, 3) = {euclidean_gcd(5, 3 )}''' )
print(f'''euclidean_gcd(1, 3) = {euclidean_gcd(1, 3 )}''' )
print(f'''euclidean_gcd(3, 6) = {euclidean_gcd(3, 6 )}''' )
print(f'''euclidean_gcd(6, 3) = {euclidean_gcd(6, 3 )}''' )
print(f'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3, 5 )}''' )
print(f'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5, 3 )}''' )
print(f'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1, 3 )}''' )
print(f'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3, 6 )}''' )
print(f'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6, 3 )}''' )
if __name__ == "__main__":
main()
| 0 | '''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class a__ :
@property
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
return self.get_dummy_input()
@property
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : List[Any]=True , a : Dict=False , a : Optional[Any]=False , a : List[Any]=False , ):
"""simple docstring"""
__lowerCamelCase = 4
__lowerCamelCase = 32
__lowerCamelCase = (32, 32)
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = torch.device(a )
__lowerCamelCase = (batch_size, num_channels) + sizes
__lowerCamelCase = randn_tensor(a , generator=a , device=a )
__lowerCamelCase = {'''hidden_states''': hidden_states}
if include_temb:
__lowerCamelCase = 1_28
__lowerCamelCase = randn_tensor((batch_size, temb_channels) , generator=a , device=a )
if include_res_hidden_states_tuple:
__lowerCamelCase = torch.manual_seed(1 )
__lowerCamelCase = (randn_tensor(a , generator=a , device=a ),)
if include_encoder_hidden_states:
__lowerCamelCase = floats_tensor((batch_size, 32, 32) ).to(a )
if include_skip_sample:
__lowerCamelCase = randn_tensor(((batch_size, 3) + sizes) , generator=a , device=a )
return dummy_input
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = {
'''in_channels''': 32,
'''out_channels''': 32,
'''temb_channels''': 1_28,
}
if self.block_type == "up":
__lowerCamelCase = 32
if self.block_type == "mid":
init_dict.pop('''out_channels''' )
__lowerCamelCase = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : List[str] ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = self.prepare_init_args_and_inputs_for_common()
__lowerCamelCase = self.block_class(**a )
unet_block.to(a )
unet_block.eval()
with torch.no_grad():
__lowerCamelCase = unet_block(**a )
if isinstance(a , a ):
__lowerCamelCase = output[0]
self.assertEqual(output.shape , self.output_shape )
__lowerCamelCase = output[0, -1, -3:, -3:]
__lowerCamelCase = torch.tensor(a ).to(a )
assert torch_all_close(output_slice.flatten() , a , atol=5e-3 )
@unittest.skipIf(torch_device == '''mps''' , '''Training is not supported in mps''' )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = self.prepare_init_args_and_inputs_for_common()
__lowerCamelCase = self.block_class(**a )
model.to(a )
model.train()
__lowerCamelCase = model(**a )
if isinstance(a , a ):
__lowerCamelCase = output[0]
__lowerCamelCase = torch.device(a )
__lowerCamelCase = randn_tensor(output.shape , device=a )
__lowerCamelCase = torch.nn.functional.mse_loss(a , a )
loss.backward()
| 546 | 0 |
import json
import sys
def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] ) -> List[str]:
"""simple docstring"""
with open(UpperCamelCase__ , encoding='''utf-8''' ) as f:
__SCREAMING_SNAKE_CASE: int = json.load(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE: List[str] = ['''<details>''', '''<summary>Show updated benchmarks!</summary>''', ''' ''']
for benchmark_name in sorted(UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE: Optional[Any] = results[benchmark_name]
__SCREAMING_SNAKE_CASE: Dict = benchmark_name.split('''/''' )[-1]
output_md.append(F"""### Benchmark: {benchmark_file_name}""" )
__SCREAMING_SNAKE_CASE: Tuple = '''| metric |'''
__SCREAMING_SNAKE_CASE: str = '''|--------|'''
__SCREAMING_SNAKE_CASE: Union[str, Any] = '''| new / old (diff) |'''
for metric_name in sorted(UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE: int = benchmark_res[metric_name]
__SCREAMING_SNAKE_CASE: str = metric_vals['''new''']
__SCREAMING_SNAKE_CASE: str = metric_vals.get('''old''' , UpperCamelCase__ )
__SCREAMING_SNAKE_CASE: str = metric_vals.get('''diff''' , UpperCamelCase__ )
__SCREAMING_SNAKE_CASE: Dict = F""" {new_val:f}""" if isinstance(UpperCamelCase__ , (int, float) ) else '''None'''
if old_val is not None:
val_str += F""" / {old_val:f}""" if isinstance(UpperCamelCase__ , (int, float) ) else "None"
if dif_val is not None:
val_str += F""" ({dif_val:f})""" if isinstance(UpperCamelCase__ , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('''</details>''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.writelines('''\n'''.join(UpperCamelCase__ ) )
if __name__ == "__main__":
lowerCAmelCase : Optional[int] = sys.argv[1]
lowerCAmelCase : Optional[int] = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 146 |
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class a ( __lowercase ):
SCREAMING_SNAKE_CASE__ : List[Any] = (EulerDiscreteScheduler,)
SCREAMING_SNAKE_CASE__ : Any = 10
def snake_case_ ( self , **_lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: str = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**_lowerCAmelCase )
return config
def snake_case_ ( self ):
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_lowerCAmelCase , beta_end=_lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: str = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE: Optional[int] = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE: List[str] = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
__SCREAMING_SNAKE_CASE: Dict = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE: Optional[Any] = self.dummy_model()
__SCREAMING_SNAKE_CASE: Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
__SCREAMING_SNAKE_CASE: Optional[int] = sample.to(_lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE: Any = scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Union[str, Any] = model(_lowerCAmelCase , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[Any] = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: int = output.prev_sample
__SCREAMING_SNAKE_CASE: Tuple = torch.sum(torch.abs(_lowerCAmelCase ) )
__SCREAMING_SNAKE_CASE: List[Any] = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Any = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE: Union[str, Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
__SCREAMING_SNAKE_CASE: str = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
__SCREAMING_SNAKE_CASE: str = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE: Optional[int] = self.dummy_model()
__SCREAMING_SNAKE_CASE: Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
__SCREAMING_SNAKE_CASE: Any = sample.to(_lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE: Tuple = scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Tuple = model(_lowerCAmelCase , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Union[str, Any] = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[int] = output.prev_sample
__SCREAMING_SNAKE_CASE: List[Any] = torch.sum(torch.abs(_lowerCAmelCase ) )
__SCREAMING_SNAKE_CASE: Any = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.26_76e-06 ) < 1e-3
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Any = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE: Tuple = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE: List[str] = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: List[str] = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE: Optional[Any] = self.dummy_model()
__SCREAMING_SNAKE_CASE: int = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__SCREAMING_SNAKE_CASE: Any = sample.to(_lowerCAmelCase )
for t in scheduler.timesteps:
__SCREAMING_SNAKE_CASE: Optional[Any] = scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Tuple = model(_lowerCAmelCase , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[int] = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: int = output.prev_sample
__SCREAMING_SNAKE_CASE: List[str] = torch.sum(torch.abs(_lowerCAmelCase ) )
__SCREAMING_SNAKE_CASE: Dict = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE: str = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE: Union[str, Any] = scheduler_class(**_lowerCAmelCase , use_karras_sigmas=_lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: int = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE: Any = self.dummy_model()
__SCREAMING_SNAKE_CASE: str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__SCREAMING_SNAKE_CASE: Optional[int] = sample.to(_lowerCAmelCase )
for t in scheduler.timesteps:
__SCREAMING_SNAKE_CASE: Optional[Any] = scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE: List[Any] = model(_lowerCAmelCase , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Any = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: List[str] = output.prev_sample
__SCREAMING_SNAKE_CASE: int = torch.sum(torch.abs(_lowerCAmelCase ) )
__SCREAMING_SNAKE_CASE: Optional[int] = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 124.52299499511719 ) < 1e-2
assert abs(result_mean.item() - 0.16213932633399963 ) < 1e-3
| 146 | 1 |
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : List[str] = filter(lambda __UpperCamelCase : p.requires_grad , model.parameters() )
snake_case_ : List[str] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__lowerCAmelCase : Tuple = logging.getLogger(__name__)
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
if metric == "rouge2":
snake_case_ : List[str] = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
snake_case_ : Optional[int] = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
snake_case_ : Union[str, Any] = """{val_avg_em:.4f}-{step_count}"""
elif metric == "loss":
snake_case_ : int = """{val_avg_loss:.4f}-{step_count}"""
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
""" function.""" )
snake_case_ : Union[str, Any] = ModelCheckpoint(
dirpath=__UpperCamelCase , filename=__UpperCamelCase , monitor=F'val_{metric}' , mode="""max""" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : int ):
'''simple docstring'''
return EarlyStopping(
monitor=F'val_{metric}' , mode="""min""" if """loss""" in metric else """max""" , patience=__UpperCamelCase , verbose=__UpperCamelCase , )
class _lowerCAmelCase ( pl.Callback ):
"""simple docstring"""
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Tuple:
'''simple docstring'''
snake_case_ : List[Any] = {f'lr_group_{i}': param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_lowercase )
@rank_zero_only
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase=True ) -> None:
'''simple docstring'''
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' )
snake_case_ : List[str] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} )
# Log results
snake_case_ : List[str] = Path(pl_module.hparams.output_dir )
if type_path == "test":
snake_case_ : Dict = od / """test_results.txt"""
snake_case_ : Dict = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
snake_case_ : str = od / f'{type_path}_results/{trainer.global_step:05d}.txt'
snake_case_ : Tuple = od / f'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=_lowercase )
generations_file.parent.mkdir(exist_ok=_lowercase )
with open(_lowercase , """a+""" ) as writer:
for key in sorted(_lowercase ):
if key in ["log", "progress_bar", "preds"]:
continue
snake_case_ : int = metrics[key]
if isinstance(_lowercase , torch.Tensor ):
snake_case_ : Optional[Any] = val.item()
snake_case_ : int = f'{key}: {val:.6f}\n'
writer.write(_lowercase )
if not save_generations:
return
if "preds" in metrics:
snake_case_ : Any = """\n""".join(metrics["""preds"""] )
generations_file.open("""w+""" ).write(_lowercase )
@rank_zero_only
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Optional[Any]:
'''simple docstring'''
try:
snake_case_ : str = pl_module.model.model.num_parameters()
except AttributeError:
snake_case_ : List[str] = pl_module.model.num_parameters()
snake_case_ : str = count_trainable_parameters(_lowercase )
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1E6, """grad_mp""": n_trainable_pars / 1E6} )
@rank_zero_only
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> List[str]:
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_lowercase , _lowercase , """test""" )
@rank_zero_only
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Any:
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 58 |
"""simple docstring"""
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a : Any = logging.get_logger(__name__)
a : List[Any] = """▁"""
a : Optional[int] = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
}
a : Dict = {
"""vocab_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"""
),
},
"""spm_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"""
)
},
}
a : int = {
"""facebook/s2t-small-librispeech-asr""": 1024,
}
a : str = ["""pt""", """fr""", """ru""", """nl""", """ro""", """it""", """es""", """de"""]
a : Optional[Any] = {"""mustc""": MUSTC_LANGS}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = MAX_MODEL_INPUT_SIZES
__lowerCamelCase = ["input_ids", "attention_mask"]
__lowerCamelCase = []
def __init__( self , snake_case__ , snake_case__ , snake_case__="<s>" , snake_case__="</s>" , snake_case__="<pad>" , snake_case__="<unk>" , snake_case__=False , snake_case__=False , snake_case__=None , snake_case__=None , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
lowercase__ : List[str]= {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , do_upper_case=snake_case__ , do_lower_case=snake_case__ , tgt_lang=snake_case__ , lang_codes=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
lowercase__ : Optional[int]= do_upper_case
lowercase__ : Optional[Any]= do_lower_case
lowercase__ : str= load_json(snake_case__ )
lowercase__ : str= {v: k for k, v in self.encoder.items()}
lowercase__ : Any= spm_file
lowercase__ : Dict= load_spm(snake_case__ , self.sp_model_kwargs )
if lang_codes is not None:
lowercase__ : Tuple= lang_codes
lowercase__ : List[str]= LANGUAGES[lang_codes]
lowercase__ : Optional[int]= [F'''<lang:{lang}>''' for lang in self.langs]
lowercase__ : str= {lang: self.sp_model.PieceToId(F'''<lang:{lang}>''' ) for lang in self.langs}
lowercase__ : Optional[int]= self.lang_tokens
lowercase__ : List[str]= tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
lowercase__ : Any= {}
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return len(self.encoder )
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return self._tgt_lang
@tgt_lang.setter
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
lowercase__ : str= new_tgt_lang
self.set_tgt_lang_special_tokens(snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
lowercase__ : int= self.lang_code_to_id[tgt_lang]
lowercase__ : Any= [lang_code_id]
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
return self.encoder.get(snake_case__ , self.encoder[self.unk_token] )
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
return self.decoder.get(snake_case__ , self.unk_token )
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
lowercase__ : List[Any]= []
lowercase__ : Any= ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
lowercase__ : int= self.sp_model.decode(snake_case__ )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
lowercase__ : List[Any]= []
else:
current_sub_tokens.append(snake_case__ )
lowercase__ : Tuple= self.sp_model.decode(snake_case__ )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def UpperCAmelCase_ ( self , snake_case__ , snake_case__=None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None , snake_case__ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
lowercase__ : str= [1] * len(self.prefix_tokens )
lowercase__ : Optional[Any]= [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(snake_case__ )) + suffix_ones
return prefix_ones + ([0] * len(snake_case__ )) + ([0] * len(snake_case__ )) + suffix_ones
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
lowercase__ : Dict= self.__dict__.copy()
lowercase__ : Optional[Any]= None
return state
def __setstate__( self , snake_case__ ):
'''simple docstring'''
lowercase__ : List[Any]= d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowercase__ : Optional[Any]= {}
lowercase__ : Optional[int]= load_spm(self.spm_file , self.sp_model_kwargs )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
lowercase__ : str= Path(snake_case__ )
assert save_dir.is_dir(), F'''{save_directory} should be a directory'''
lowercase__ : Optional[Any]= save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
lowercase__ : Dict= save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , snake_case__ )
if os.path.abspath(self.spm_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , snake_case__ )
elif not os.path.isfile(self.spm_file ):
with open(snake_case__ , "wb" ) as fi:
lowercase__ : Any= self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (str(snake_case__ ), str(snake_case__ ))
def lowercase__(A , A ) ->sentencepiece.SentencePieceProcessor:
"""simple docstring"""
lowercase__ : List[Any]= sentencepiece.SentencePieceProcessor(**A )
spm.Load(str(A ) )
return spm
def lowercase__(A ) ->Union[Dict, List]:
"""simple docstring"""
with open(A , "r" ) as f:
return json.load(A )
def lowercase__(A , A ) ->None:
"""simple docstring"""
with open(A , "w" ) as f:
json.dump(A , A , indent=2 )
| 218 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowercase__ = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
_lowerCAmelCase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_lowerCAmelCase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_lowerCAmelCase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_lowerCAmelCase = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" )
__a : Tuple = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
__a : Optional[Any] = text_classifier("""This is great !""" , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}] )
__a : int = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
__a : List[str] = text_classifier("""This is great !""" , top_k=1 )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
# Legacy behavior
__a : Optional[int] = text_classifier("""This is great !""" , return_all_scores=_lowercase )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
__a : Tuple = text_classifier("""This is great !""" , return_all_scores=_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}]] )
__a : Any = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
__a : Union[str, Any] = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [
{"""label""": """LABEL_0""", """score""": 0.504},
{"""label""": """LABEL_0""", """score""": 0.504},
] , )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
import torch
__a : Any = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , )
__a : Optional[int] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" )
__a : List[str] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@slow
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = pipeline("""text-classification""" )
__a : Tuple = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
__a : Optional[int] = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
__a : Union[str, Any] = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
@slow
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = pipeline("""text-classification""" , framework="""tf""" )
__a : str = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
__a : Tuple = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
__a : str = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Dict = TextClassificationPipeline(model=_lowercase , tokenizer=_lowercase )
return text_classifier, ["HuggingFace is in", "This is another test"]
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
__a : List[str] = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
__a : Union[str, Any] = """HuggingFace is in"""
__a : List[str] = text_classifier(_lowercase )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
__a : Optional[int] = ["""HuggingFace is in """, """Paris is in France"""]
__a : Dict = text_classifier(_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}, {"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
__a : Dict = text_classifier(_lowercase , top_k=_lowercase )
__a : Dict = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(_lowercase ) , [[{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] * N, [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] * N] , )
__a : Dict = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""}
__a : Any = text_classifier(_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , {"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )} , )
self.assertTrue(outputs["""label"""] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
__a : Dict = [["""HuggingFace is in """, """Paris is in France"""]]
with self.assertRaises(_lowercase ):
text_classifier(_lowercase )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
__a : Optional[int] = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
| 702 |
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase : list[int] ):
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
__a : Any = sum(_lowerCamelCase ) / len(_lowerCamelCase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
_UpperCAmelCase : List[Any] = {'''vocab_file''': '''sentencepiece.bpe.model'''}
_UpperCAmelCase : Optional[int] = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
}
_UpperCAmelCase : Tuple = {
'''moussaKam/mbarthez''': 10_24,
'''moussaKam/barthez''': 10_24,
'''moussaKam/barthez-orangesum-title''': 10_24,
}
_UpperCAmelCase : Any = '''▁'''
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ['input_ids', 'attention_mask']
def __init__( self , snake_case_ , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_ = None , **snake_case_ , ):
# Mask token behave like a normal word, i.e. include the space before it
lowercase =AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
lowercase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , **snake_case_ , )
lowercase =vocab_file
lowercase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case_ ) )
lowercase ={'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
lowercase =len(self.sp_model ) - 1
lowercase ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def _A( self , snake_case_ , snake_case_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase =[self.cls_token_id]
lowercase =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _A( self , snake_case_ , snake_case_ = None , snake_case_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case_ )) + [1]
return [1] + ([0] * len(snake_case_ )) + [1, 1] + ([0] * len(snake_case_ )) + [1]
def _A( self , snake_case_ , snake_case_ = None ):
lowercase =[self.sep_token_id]
lowercase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _A( self ):
return len(self.sp_model )
def _A( self ):
lowercase ={self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _A( self , snake_case_ ):
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def _A( self , snake_case_ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase =self.sp_model.PieceToId(snake_case_ )
return spm_id if spm_id else self.unk_token_id
def _A( self , snake_case_ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(snake_case_ )
def _A( self , snake_case_ ):
lowercase =[]
lowercase =''''''
lowercase =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case_ ) + token
lowercase =True
lowercase =[]
else:
current_sub_tokens.append(snake_case_ )
lowercase =False
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def __getstate__( self ):
lowercase =self.__dict__.copy()
lowercase =None
return state
def __setstate__( self , snake_case_ ):
lowercase =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase ={}
lowercase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _A( self , snake_case_ , snake_case_ = None ):
if not os.path.isdir(snake_case_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase =os.path.join(
snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case_ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case_ , '''wb''' ) as fi:
lowercase =self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (out_vocab_file,)
| 72 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = XLMTokenizer
lowerCAmelCase__ = False
def lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCamelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__lowerCamelCase = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
__lowerCamelCase = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(__UpperCAmelCase ) )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = '''lower newer'''
__lowerCamelCase = '''lower newer'''
return input_text, output_text
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = XLMTokenizer(self.vocab_file , self.merges_file )
__lowerCamelCase = '''lower'''
__lowerCamelCase = ['''low''', '''er</w>''']
__lowerCamelCase = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = tokens + ['''<unk>''']
__lowerCamelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' )
__lowerCamelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=__UpperCAmelCase )
__lowerCamelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__UpperCAmelCase )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 175 | 0 |
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class lowerCAmelCase__ :
'''simple docstring'''
pass | 705 | from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@slow
@require_torch
def _snake_case ( self : int ) -> str:
_lowerCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
_lowerCamelCase = BertTokenizer.from_pretrained('bert-base-uncased' )
_lowerCamelCase = bertabert.config.encoder.vocab_size
_lowerCamelCase = tokenizer.sep_token_id
_lowerCamelCase = tokenizer.cls_token_id
_lowerCamelCase = 1_2_8
_lowerCamelCase = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
_lowerCamelCase = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
_lowerCamelCase = train_dataset.select(range(3_2 ) )
_lowerCamelCase = val_dataset.select(range(1_6 ) )
_lowerCamelCase = 4
def _map_to_encoder_decoder_inputs(snake_case__ : Any ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_lowerCamelCase = tokenizer(batch['article'] , padding='max_length' , truncation=snake_case__ , max_length=5_1_2 )
_lowerCamelCase = tokenizer(batch['highlights'] , padding='max_length' , truncation=snake_case__ , max_length=1_2_8 )
_lowerCamelCase = inputs.input_ids
_lowerCamelCase = inputs.attention_mask
_lowerCamelCase = outputs.input_ids
_lowerCamelCase = outputs.input_ids.copy()
_lowerCamelCase = [
[-1_0_0 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
_lowerCamelCase = outputs.attention_mask
assert all(len(snake_case__ ) == 5_1_2 for x in inputs.input_ids )
assert all(len(snake_case__ ) == 1_2_8 for x in outputs.input_ids )
return batch
def _compute_metrics(snake_case__ : Union[str, Any] ):
_lowerCamelCase = pred.label_ids
_lowerCamelCase = pred.predictions
# all unnecessary tokens are removed
_lowerCamelCase = tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )
_lowerCamelCase = tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )
_lowerCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(snake_case__ ) )] ) / len(snake_case__ )
return {"accuracy": accuracy}
# map train dataset
_lowerCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=snake_case__ , batch_size=snake_case__ , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
_lowerCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=snake_case__ , batch_size=snake_case__ , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
_lowerCamelCase = self.get_auto_remove_tmp_dir()
_lowerCamelCase = SeqaSeqTrainingArguments(
output_dir=snake_case__ , per_device_train_batch_size=snake_case__ , per_device_eval_batch_size=snake_case__ , predict_with_generate=snake_case__ , evaluation_strategy='steps' , do_train=snake_case__ , do_eval=snake_case__ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
_lowerCamelCase = SeqaSeqTrainer(
model=snake_case__ , args=snake_case__ , compute_metrics=_compute_metrics , train_dataset=snake_case__ , eval_dataset=snake_case__ , tokenizer=snake_case__ , )
# start training
trainer.train() | 234 | 0 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 220 |
'''simple docstring'''
def _lowercase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = " " ):
'''simple docstring'''
__A : List[str] = []
__A : Tuple = 0
for index, char in enumerate(SCREAMING_SNAKE_CASE ):
if char == separator:
split_words.append(string[last_index:index] )
__A : Union[str, Any] = index + 1
elif index + 1 == len(SCREAMING_SNAKE_CASE ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 111 | 0 |
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Any = logging.get_logger('transformers.models.encodec')
__SCREAMING_SNAKE_CASE : Tuple = {
'quantizer.vq.layers.*._codebook.inited': 'quantizer.layers.*.codebook.inited',
'quantizer.vq.layers.*._codebook.cluster_size': 'quantizer.layers.*.codebook.cluster_size',
'quantizer.vq.layers.*._codebook.embed': 'quantizer.layers.*.codebook.embed',
'quantizer.vq.layers.*._codebook.embed_avg': 'quantizer.layers.*.codebook.embed_avg',
}
__SCREAMING_SNAKE_CASE : Any = {
'encoder.model.0.conv.conv': 'encoder.layers.0.conv',
'encoder.model.1.block.1.conv.conv': 'encoder.layers.1.block.1.conv',
'encoder.model.1.block.3.conv.conv': 'encoder.layers.1.block.3.conv',
'encoder.model.1.shortcut.conv.conv': 'encoder.layers.1.shortcut.conv',
'encoder.model.3.conv.conv': 'encoder.layers.3.conv',
'encoder.model.4.block.1.conv.conv': 'encoder.layers.4.block.1.conv',
'encoder.model.4.block.3.conv.conv': 'encoder.layers.4.block.3.conv',
'encoder.model.4.shortcut.conv.conv': 'encoder.layers.4.shortcut.conv',
'encoder.model.6.conv.conv': 'encoder.layers.6.conv',
'encoder.model.7.block.1.conv.conv': 'encoder.layers.7.block.1.conv',
'encoder.model.7.block.3.conv.conv': 'encoder.layers.7.block.3.conv',
'encoder.model.7.shortcut.conv.conv': 'encoder.layers.7.shortcut.conv',
'encoder.model.9.conv.conv': 'encoder.layers.9.conv',
'encoder.model.10.block.1.conv.conv': 'encoder.layers.10.block.1.conv',
'encoder.model.10.block.3.conv.conv': 'encoder.layers.10.block.3.conv',
'encoder.model.10.shortcut.conv.conv': 'encoder.layers.10.shortcut.conv',
'encoder.model.12.conv.conv': 'encoder.layers.12.conv',
'encoder.model.13.lstm': 'encoder.layers.13.lstm',
'encoder.model.15.conv.conv': 'encoder.layers.15.conv',
}
__SCREAMING_SNAKE_CASE : str = {
'encoder.model.0.conv.norm': 'encoder.layers.0.norm',
'encoder.model.1.block.1.conv.norm': 'encoder.layers.1.block.1.norm',
'encoder.model.1.block.3.conv.norm': 'encoder.layers.1.block.3.norm',
'encoder.model.1.shortcut.conv.norm': 'encoder.layers.1.shortcut.norm',
'encoder.model.3.conv.norm': 'encoder.layers.3.norm',
'encoder.model.4.block.1.conv.norm': 'encoder.layers.4.block.1.norm',
'encoder.model.4.block.3.conv.norm': 'encoder.layers.4.block.3.norm',
'encoder.model.4.shortcut.conv.norm': 'encoder.layers.4.shortcut.norm',
'encoder.model.6.conv.norm': 'encoder.layers.6.norm',
'encoder.model.7.block.1.conv.norm': 'encoder.layers.7.block.1.norm',
'encoder.model.7.block.3.conv.norm': 'encoder.layers.7.block.3.norm',
'encoder.model.7.shortcut.conv.norm': 'encoder.layers.7.shortcut.norm',
'encoder.model.9.conv.norm': 'encoder.layers.9.norm',
'encoder.model.10.block.1.conv.norm': 'encoder.layers.10.block.1.norm',
'encoder.model.10.block.3.conv.norm': 'encoder.layers.10.block.3.norm',
'encoder.model.10.shortcut.conv.norm': 'encoder.layers.10.shortcut.norm',
'encoder.model.12.conv.norm': 'encoder.layers.12.norm',
'encoder.model.15.conv.norm': 'encoder.layers.15.norm',
}
__SCREAMING_SNAKE_CASE : Dict = {
'decoder.model.0.conv.conv': 'decoder.layers.0.conv',
'decoder.model.1.lstm': 'decoder.layers.1.lstm',
'decoder.model.3.convtr.convtr': 'decoder.layers.3.conv',
'decoder.model.4.block.1.conv.conv': 'decoder.layers.4.block.1.conv',
'decoder.model.4.block.3.conv.conv': 'decoder.layers.4.block.3.conv',
'decoder.model.4.shortcut.conv.conv': 'decoder.layers.4.shortcut.conv',
'decoder.model.6.convtr.convtr': 'decoder.layers.6.conv',
'decoder.model.7.block.1.conv.conv': 'decoder.layers.7.block.1.conv',
'decoder.model.7.block.3.conv.conv': 'decoder.layers.7.block.3.conv',
'decoder.model.7.shortcut.conv.conv': 'decoder.layers.7.shortcut.conv',
'decoder.model.9.convtr.convtr': 'decoder.layers.9.conv',
'decoder.model.10.block.1.conv.conv': 'decoder.layers.10.block.1.conv',
'decoder.model.10.block.3.conv.conv': 'decoder.layers.10.block.3.conv',
'decoder.model.10.shortcut.conv.conv': 'decoder.layers.10.shortcut.conv',
'decoder.model.12.convtr.convtr': 'decoder.layers.12.conv',
'decoder.model.13.block.1.conv.conv': 'decoder.layers.13.block.1.conv',
'decoder.model.13.block.3.conv.conv': 'decoder.layers.13.block.3.conv',
'decoder.model.13.shortcut.conv.conv': 'decoder.layers.13.shortcut.conv',
'decoder.model.15.conv.conv': 'decoder.layers.15.conv',
}
__SCREAMING_SNAKE_CASE : Dict = {
'decoder.model.0.conv.norm': 'decoder.layers.0.norm',
'decoder.model.3.convtr.norm': 'decoder.layers.3.norm',
'decoder.model.4.block.1.conv.norm': 'decoder.layers.4.block.1.norm',
'decoder.model.4.block.3.conv.norm': 'decoder.layers.4.block.3.norm',
'decoder.model.4.shortcut.conv.norm': 'decoder.layers.4.shortcut.norm',
'decoder.model.6.convtr.norm': 'decoder.layers.6.norm',
'decoder.model.7.block.1.conv.norm': 'decoder.layers.7.block.1.norm',
'decoder.model.7.block.3.conv.norm': 'decoder.layers.7.block.3.norm',
'decoder.model.7.shortcut.conv.norm': 'decoder.layers.7.shortcut.norm',
'decoder.model.9.convtr.norm': 'decoder.layers.9.norm',
'decoder.model.10.block.1.conv.norm': 'decoder.layers.10.block.1.norm',
'decoder.model.10.block.3.conv.norm': 'decoder.layers.10.block.3.norm',
'decoder.model.10.shortcut.conv.norm': 'decoder.layers.10.shortcut.norm',
'decoder.model.12.convtr.norm': 'decoder.layers.12.norm',
'decoder.model.13.block.1.conv.norm': 'decoder.layers.13.block.1.norm',
'decoder.model.13.block.3.conv.norm': 'decoder.layers.13.block.3.norm',
'decoder.model.13.shortcut.conv.norm': 'decoder.layers.13.shortcut.norm',
'decoder.model.15.conv.norm': 'decoder.layers.15.norm',
}
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__SCREAMING_SNAKE_CASE : str = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__SCREAMING_SNAKE_CASE : List[str] = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Tuple:
'''simple docstring'''
for attribute in key.split("." ):
_snake_case : Tuple = getattr(__lowercase , __lowercase )
if weight_type is not None:
_snake_case : Dict = getattr(__lowercase , __lowercase ).shape
else:
_snake_case : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
_snake_case : Union[str, Any] = value
elif weight_type == "weight_g":
_snake_case : Union[str, Any] = value
elif weight_type == "weight_v":
_snake_case : Dict = value
elif weight_type == "bias":
_snake_case : Dict = value
elif weight_type == "running_mean":
_snake_case : Optional[int] = value
elif weight_type == "running_var":
_snake_case : Tuple = value
elif weight_type == "num_batches_tracked":
_snake_case : str = value
elif weight_type == "weight_ih_l0":
_snake_case : str = value
elif weight_type == "weight_hh_l0":
_snake_case : str = value
elif weight_type == "bias_ih_l0":
_snake_case : Tuple = value
elif weight_type == "bias_hh_l0":
_snake_case : Union[str, Any] = value
elif weight_type == "weight_ih_l1":
_snake_case : Tuple = value
elif weight_type == "weight_hh_l1":
_snake_case : List[Any] = value
elif weight_type == "bias_ih_l1":
_snake_case : Any = value
elif weight_type == "bias_hh_l1":
_snake_case : Optional[int] = value
else:
_snake_case : List[Any] = value
logger.info(F"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def snake_case (__lowercase , __lowercase ) -> List[str]:
'''simple docstring'''
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_snake_case ,_snake_case : Optional[Any] = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def snake_case (__lowercase , __lowercase , __lowercase ) -> Optional[int]:
'''simple docstring'''
_snake_case : Union[str, Any] = []
if model_name == "encodec_24khz" or "encodec_32khz":
_snake_case : List[Any] = MAPPING_24K
elif model_name == "encodec_48khz":
_snake_case : Optional[Any] = MAPPING_48K
else:
raise ValueError(F"""Unsupported model: {model_name}""" )
for name, value in orig_dict.items():
if should_ignore(__lowercase , __lowercase ):
logger.info(F"""{name} was ignored""" )
continue
_snake_case : Optional[int] = False
for key, mapped_key in MAPPING.items():
if "*" in key:
_snake_case ,_snake_case : List[str] = key.split(".*." )
if prefix in name and suffix in name:
_snake_case : Union[str, Any] = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("embed" ) and name.endswith("embed_avg" ):
continue
_snake_case : Dict = True
if "*" in mapped_key:
_snake_case : Any = name.split(__lowercase )[0].split("." )[-2]
_snake_case : Optional[int] = mapped_key.replace("*" , __lowercase )
if "weight_g" in name:
_snake_case : Optional[int] = "weight_g"
elif "weight_v" in name:
_snake_case : Tuple = "weight_v"
elif "weight_ih_l0" in name:
_snake_case : Optional[int] = "weight_ih_l0"
elif "weight_hh_l0" in name:
_snake_case : List[Any] = "weight_hh_l0"
elif "bias_ih_l0" in name:
_snake_case : Union[str, Any] = "bias_ih_l0"
elif "bias_hh_l0" in name:
_snake_case : List[str] = "bias_hh_l0"
elif "weight_ih_l1" in name:
_snake_case : int = "weight_ih_l1"
elif "weight_hh_l1" in name:
_snake_case : Tuple = "weight_hh_l1"
elif "bias_ih_l1" in name:
_snake_case : Union[str, Any] = "bias_ih_l1"
elif "bias_hh_l1" in name:
_snake_case : Dict = "bias_hh_l1"
elif "bias" in name:
_snake_case : List[str] = "bias"
elif "weight" in name:
_snake_case : List[Any] = "weight"
elif "running_mean" in name:
_snake_case : List[str] = "running_mean"
elif "running_var" in name:
_snake_case : Tuple = "running_var"
elif "num_batches_tracked" in name:
_snake_case : Tuple = "num_batches_tracked"
else:
_snake_case : Optional[int] = None
set_recursively(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
continue
if not is_used:
unused_weights.append(__lowercase )
logger.warning(F"""Unused weights: {unused_weights}""" )
@torch.no_grad()
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , ) -> List[str]:
'''simple docstring'''
if config_path is not None:
_snake_case : int = EncodecConfig.from_pretrained(__lowercase )
else:
_snake_case : int = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
_snake_case : str = [8, 5, 4, 4]
_snake_case : List[Any] = [2.2]
_snake_case : Any = 64
_snake_case : List[Any] = 32_000
_snake_case : Tuple = 2_048
_snake_case : Optional[Any] = False
_snake_case : List[Any] = False
_snake_case : Optional[Any] = False
elif model_name == "encodec_48khz":
_snake_case : Tuple = [8, 5, 4, 2]
_snake_case : List[str] = [3.0, 6.0, 12.0, 24.0]
_snake_case : Optional[Any] = 48_000
_snake_case : List[Any] = 2
_snake_case : str = False
_snake_case : str = "time_group_norm"
_snake_case : Optional[Any] = True
_snake_case : Tuple = 1.0
_snake_case : int = 0.01
else:
raise ValueError(F"""Unknown model name: {model_name}""" )
_snake_case : Tuple = EncodecModel(__lowercase )
_snake_case : List[str] = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(__lowercase )
_snake_case : List[str] = torch.load(__lowercase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
_snake_case : int = original_checkpoint["best_state"]
recursively_load_weights(__lowercase , __lowercase , __lowercase )
model.save_pretrained(__lowercase )
if repo_id:
print("Pushing to the hub..." )
feature_extractor.push_to_hub(__lowercase )
model.push_to_hub(__lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--model',
default='encodec_24khz',
type=str,
help='The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
__SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
) | 580 | import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__SCREAMING_SNAKE_CASE : Any = 1_6
__SCREAMING_SNAKE_CASE : str = 3_2
def snake_case (__lowercase , __lowercase = 16 , __lowercase = "bert-base-cased" ) -> Union[str, Any]:
'''simple docstring'''
_snake_case : List[str] = AutoTokenizer.from_pretrained(__lowercase )
_snake_case : List[Any] = load_dataset("glue" , "mrpc" )
def tokenize_function(__lowercase ):
# max_length=None => use the model max length (it's actually the default)
_snake_case : Union[str, Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__lowercase , max_length=__lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_snake_case : Optional[Any] = datasets.map(
__lowercase , batched=__lowercase , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__lowercase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_snake_case : str = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__lowercase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(__lowercase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
_snake_case : Tuple = DataLoader(
tokenized_datasets["train"] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=__lowercase )
_snake_case : int = DataLoader(
tokenized_datasets["validation"] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=__lowercase )
return train_dataloader, eval_dataloader
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase ) -> Union[str, Any]:
'''simple docstring'''
model.eval()
_snake_case : Tuple = 0
for step, batch in enumerate(__lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_snake_case : Dict = model(**__lowercase )
_snake_case : str = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_snake_case ,_snake_case : Dict = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__lowercase ) - 1:
_snake_case : Dict = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_snake_case : int = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__lowercase , references=__lowercase , )
_snake_case : Any = metric.compute()
return eval_metric["accuracy"]
def snake_case (__lowercase , __lowercase ) -> int:
'''simple docstring'''
_snake_case : Dict = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_snake_case : Union[str, Any] = config["lr"]
_snake_case : Any = int(config["num_epochs"] )
_snake_case : int = int(config["seed"] )
_snake_case : Tuple = int(config["batch_size"] )
_snake_case : Tuple = args.model_name_or_path
set_seed(__lowercase )
_snake_case ,_snake_case : List[str] = get_dataloaders(__lowercase , __lowercase , __lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_snake_case : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(__lowercase , return_dict=__lowercase )
# Instantiate optimizer
_snake_case : str = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_snake_case : Optional[int] = optimizer_cls(params=model.parameters() , lr=__lowercase )
if accelerator.state.deepspeed_plugin is not None:
_snake_case : List[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
_snake_case : List[str] = 1
_snake_case : str = (len(__lowercase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_snake_case : Any = get_linear_schedule_with_warmup(
optimizer=__lowercase , num_warmup_steps=0 , num_training_steps=__lowercase , )
else:
_snake_case : List[str] = DummyScheduler(__lowercase , total_num_steps=__lowercase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case : List[Any] = accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
# We need to keep track of how many total steps we have iterated over
_snake_case : Optional[int] = 0
# We also need to keep track of the stating epoch so files are named properly
_snake_case : Optional[Any] = 0
_snake_case : Optional[Any] = evaluate.load("glue" , "mrpc" )
_snake_case : List[Any] = num_epochs
if args.partial_train_epoch is not None:
_snake_case : str = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
_snake_case : int = args.resume_from_checkpoint.split("epoch_" )[1]
_snake_case : Optional[Any] = ""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
_snake_case : int = int(__lowercase ) + 1
_snake_case : Any = evaluation_loop(__lowercase , __lowercase , __lowercase , __lowercase )
accelerator.print("resumed checkpoint performance:" , __lowercase )
accelerator.print("resumed checkpoint's scheduler's lr:" , lr_scheduler.get_lr()[0] )
accelerator.print("resumed optimizers's lr:" , optimizer.param_groups[0]["lr"] )
with open(os.path.join(args.output_dir , F"""state_{starting_epoch-1}.json""" ) , "r" ) as f:
_snake_case : int = json.load(__lowercase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
_snake_case : List[str] = {}
for epoch in range(__lowercase , __lowercase ):
model.train()
for step, batch in enumerate(__lowercase ):
_snake_case : List[Any] = model(**__lowercase )
_snake_case : int = outputs.loss
_snake_case : Any = loss / gradient_accumulation_steps
accelerator.backward(__lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
_snake_case : int = F"""epoch_{epoch}"""
_snake_case : Optional[Any] = os.path.join(args.output_dir , __lowercase )
accelerator.save_state(__lowercase )
_snake_case : Dict = evaluation_loop(__lowercase , __lowercase , __lowercase , __lowercase )
_snake_case : int = accuracy
_snake_case : List[str] = lr_scheduler.get_lr()[0]
_snake_case : Any = optimizer.param_groups[0]["lr"]
_snake_case : Optional[int] = epoch
_snake_case : Optional[Any] = overall_step
accelerator.print(F"""epoch {epoch}:""" , __lowercase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F"""state_{epoch}.json""" ) , "w" ) as f:
json.dump(__lowercase , __lowercase )
def snake_case () -> Optional[int]:
'''simple docstring'''
_snake_case : Optional[Any] = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=__lowercase , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__lowercase , )
parser.add_argument(
"--output_dir" , type=__lowercase , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=__lowercase , default=__lowercase , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--partial_train_epoch" , type=__lowercase , default=__lowercase , help="If passed, the training will stop after this number of epochs." , )
parser.add_argument(
"--num_epochs" , type=__lowercase , default=2 , help="Number of train epochs." , )
_snake_case : Dict = parser.parse_args()
_snake_case : Dict = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(__lowercase , __lowercase )
if __name__ == "__main__":
main() | 580 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.