code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("1.0.0a"):
raise Exception("requires fairseq >= 1.0.0a")
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[Any] = "Hello world! cécé herlolip"
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = FairseqRobertaModel.from_pretrained(SCREAMING_SNAKE_CASE )
roberta.eval() # disable dropout
SCREAMING_SNAKE_CASE_ = roberta.model.encoder.sentence_encoder
SCREAMING_SNAKE_CASE_ = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
SCREAMING_SNAKE_CASE_ = roberta.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our RoBERTa config:' , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = XLMRobertaXLForSequenceClassification(SCREAMING_SNAKE_CASE ) if classification_head else XLMRobertaXLForMaskedLM(SCREAMING_SNAKE_CASE )
model.eval()
# Now let's copy all the weights.
# Embeddings
SCREAMING_SNAKE_CASE_ = roberta_sent_encoder.embed_tokens.weight
SCREAMING_SNAKE_CASE_ = roberta_sent_encoder.embed_positions.weight
SCREAMING_SNAKE_CASE_ = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
SCREAMING_SNAKE_CASE_ = roberta_sent_encoder.layer_norm.weight
SCREAMING_SNAKE_CASE_ = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
SCREAMING_SNAKE_CASE_ = model.roberta.encoder.layer[i]
SCREAMING_SNAKE_CASE_ = roberta_sent_encoder.layers[i]
SCREAMING_SNAKE_CASE_ = layer.attention
SCREAMING_SNAKE_CASE_ = roberta_layer.self_attn_layer_norm.weight
SCREAMING_SNAKE_CASE_ = roberta_layer.self_attn_layer_norm.bias
# self attention
SCREAMING_SNAKE_CASE_ = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
SCREAMING_SNAKE_CASE_ = roberta_layer.self_attn.q_proj.weight
SCREAMING_SNAKE_CASE_ = roberta_layer.self_attn.q_proj.bias
SCREAMING_SNAKE_CASE_ = roberta_layer.self_attn.k_proj.weight
SCREAMING_SNAKE_CASE_ = roberta_layer.self_attn.k_proj.bias
SCREAMING_SNAKE_CASE_ = roberta_layer.self_attn.v_proj.weight
SCREAMING_SNAKE_CASE_ = roberta_layer.self_attn.v_proj.bias
# self-attention output
SCREAMING_SNAKE_CASE_ = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
SCREAMING_SNAKE_CASE_ = roberta_layer.self_attn.out_proj.weight
SCREAMING_SNAKE_CASE_ = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
SCREAMING_SNAKE_CASE_ = roberta_layer.final_layer_norm.weight
SCREAMING_SNAKE_CASE_ = roberta_layer.final_layer_norm.bias
# intermediate
SCREAMING_SNAKE_CASE_ = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE_ = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE_ = roberta_layer.fca.bias
# output
SCREAMING_SNAKE_CASE_ = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE_ = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE_ = roberta_layer.fca.bias
# end of layer
if classification_head:
SCREAMING_SNAKE_CASE_ = roberta.model.classification_heads['mnli'].dense.weight
SCREAMING_SNAKE_CASE_ = roberta.model.classification_heads['mnli'].dense.bias
SCREAMING_SNAKE_CASE_ = roberta.model.classification_heads['mnli'].out_proj.weight
SCREAMING_SNAKE_CASE_ = roberta.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
SCREAMING_SNAKE_CASE_ = roberta.model.encoder.lm_head.dense.weight
SCREAMING_SNAKE_CASE_ = roberta.model.encoder.lm_head.dense.bias
SCREAMING_SNAKE_CASE_ = roberta.model.encoder.lm_head.layer_norm.weight
SCREAMING_SNAKE_CASE_ = roberta.model.encoder.lm_head.layer_norm.bias
SCREAMING_SNAKE_CASE_ = roberta.model.encoder.lm_head.weight
SCREAMING_SNAKE_CASE_ = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
SCREAMING_SNAKE_CASE_ = roberta.encode(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE )[0]
if classification_head:
SCREAMING_SNAKE_CASE_ = roberta.model.classification_heads['mnli'](roberta.extract_features(SCREAMING_SNAKE_CASE ) )
else:
SCREAMING_SNAKE_CASE_ = roberta.model(SCREAMING_SNAKE_CASE )[0]
print(our_output.shape , their_output.shape )
SCREAMING_SNAKE_CASE_ = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7
SCREAMING_SNAKE_CASE_ = torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
pathlib.Path(SCREAMING_SNAKE_CASE ).mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
SCREAMING_SNAKE_CASE__ : Dict = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 205 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
@slow
def A_( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TFAutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' )
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('google/mt5-small' )
SCREAMING_SNAKE_CASE_ = tokenizer('Hello there' , return_tensors='tf' ).input_ids
SCREAMING_SNAKE_CASE_ = tokenizer('Hi I am' , return_tensors='tf' ).input_ids
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ).loss
SCREAMING_SNAKE_CASE_ = -tf.math.reduce_mean(SCREAMING_SNAKE_CASE ).numpy()
SCREAMING_SNAKE_CASE_ = -2_1.2_2_8_1_6_8
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 205 | 1 |
def __lowerCamelCase (UpperCAmelCase__ : list[list[int]] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : set ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = len(snake_case_ ), len(grid[0] )
if (
min(snake_case_ , snake_case_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
SCREAMING_SNAKE_CASE = 0
count += depth_first_search(snake_case_ , row + 1 , snake_case_ , snake_case_ )
count += depth_first_search(snake_case_ , row - 1 , snake_case_ , snake_case_ )
count += depth_first_search(snake_case_ , snake_case_ , col + 1 , snake_case_ )
count += depth_first_search(snake_case_ , snake_case_ , col - 1 , snake_case_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 | import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any]=7 , _UpperCamelCase : Any=3 , _UpperCamelCase : str=18 , _UpperCamelCase : Tuple=30 , _UpperCamelCase : Optional[int]=400 , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : int=True , _UpperCamelCase : Optional[int]=[0.5, 0.5, 0.5] , _UpperCamelCase : List[str]=[0.5, 0.5, 0.5] , ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = size if size is not None else {"height": 18, "width": 18}
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean
SCREAMING_SNAKE_CASE = image_std
def __snake_case( self : List[Any] ) -> Any:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase ( a , unittest.TestCase ):
lowercase__ : Any = DPTImageProcessor if is_vision_available() else None
def __snake_case( self : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = DPTImageProcessingTester(self )
@property
def __snake_case( self : List[Any] ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case( self : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , "image_mean" ) )
self.assertTrue(hasattr(_UpperCamelCase , "image_std" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "size" ) )
def __snake_case( self : Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def __snake_case( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __snake_case( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __snake_case( self : Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 647 | 0 |
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a__ ( lowerCamelCase_ , unittest.TestCase ):
A = None
A = BloomTokenizerFast
A = BloomTokenizerFast
A = True
A = False
A = """tokenizer_file"""
A = {"""bos_token""": """<s>""", """eos_token""": """</s>""", """unk_token""": """<unk>""", """pad_token""": """<pad>"""}
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE_ : Optional[Any] = BloomTokenizerFast.from_pretrained("bigscience/tokenizer" )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self : Dict,**_A : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname,**lowerCAmelCase__ )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : List[Any] = ["The quick brown fox</s>", "jumps over the lazy dog</s>"]
SCREAMING_SNAKE_CASE_ : int = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]]
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.batch_encode_plus(lowerCAmelCase__ )["input_ids"]
self.assertListEqual(lowerCAmelCase__,lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.batch_decode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__,lowerCAmelCase__ )
def __UpperCamelCase ( self : str,_A : Tuple=6 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__,**lowerCAmelCase__ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
SCREAMING_SNAKE_CASE_ : List[Any] = "This is a simple input"
SCREAMING_SNAKE_CASE_ : Tuple = ["This is a simple input 1", "This is a simple input 2"]
SCREAMING_SNAKE_CASE_ : List[Any] = ("This is a simple input", "This is a pair")
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
try:
tokenizer_r.encode(lowerCAmelCase__,max_length=lowerCAmelCase__ )
tokenizer_r.encode_plus(lowerCAmelCase__,max_length=lowerCAmelCase__ )
tokenizer_r.batch_encode_plus(lowerCAmelCase__,max_length=lowerCAmelCase__ )
tokenizer_r.encode(lowerCAmelCase__,max_length=lowerCAmelCase__ )
tokenizer_r.batch_encode_plus(lowerCAmelCase__,max_length=lowerCAmelCase__ )
except ValueError:
self.fail("Bloom Tokenizer should be able to deal with padding" )
SCREAMING_SNAKE_CASE_ : List[str] = None # Hotfixing padding = None
self.assertRaises(lowerCAmelCase__,tokenizer_r.encode,lowerCAmelCase__,max_length=lowerCAmelCase__,padding="max_length" )
# Simple input
self.assertRaises(lowerCAmelCase__,tokenizer_r.encode_plus,lowerCAmelCase__,max_length=lowerCAmelCase__,padding="max_length" )
# Simple input
self.assertRaises(
lowerCAmelCase__,tokenizer_r.batch_encode_plus,lowerCAmelCase__,max_length=lowerCAmelCase__,padding="max_length",)
# Pair input
self.assertRaises(lowerCAmelCase__,tokenizer_r.encode,lowerCAmelCase__,max_length=lowerCAmelCase__,padding="max_length" )
# Pair input
self.assertRaises(lowerCAmelCase__,tokenizer_r.encode_plus,lowerCAmelCase__,max_length=lowerCAmelCase__,padding="max_length" )
# Pair input
self.assertRaises(
lowerCAmelCase__,tokenizer_r.batch_encode_plus,lowerCAmelCase__,max_length=lowerCAmelCase__,padding="max_length",)
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_dataset("xnli","all_languages",split="test",streaming=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = next(iter(lowerCAmelCase__ ) )["premise"] # pick up one data
SCREAMING_SNAKE_CASE_ : int = list(sample_data.values() )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(map(tokenizer.encode,lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE_ : str = [tokenizer.decode(lowerCAmelCase__,clean_up_tokenization_spaces=lowerCAmelCase__ ) for x in output_tokens]
self.assertListEqual(lowerCAmelCase__,lowerCAmelCase__ )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ),1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ),1 )
| 216 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowerCAmelCase ( metaclass=lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = ["""torch""", """torchsde"""]
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
requires_backends(self , ['torch', 'torchsde'] )
@classmethod
def __A ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
requires_backends(cls , ['torch', 'torchsde'] )
@classmethod
def __A ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
requires_backends(cls , ['torch', 'torchsde'] )
| 247 | 0 |
import socket
def __magic_name__( ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
_lowerCamelCase = socket.gethostname()
_lowerCamelCase = 1_2312
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
_lowerCamelCase = sock.recv(1024 )
if not data:
break
out_file.write(__UpperCAmelCase )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 710 | from typing import List
import numpy as np
def __magic_name__( __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = {key: len(__UpperCAmelCase ) for key, value in gen_kwargs.items() if isinstance(__UpperCAmelCase , __UpperCAmelCase )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'''Sharding is ambiguous for this dataset: '''
+ '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'''
+ '''\n'''.join(F'\t- key {key} has length {length}' for key, length in lists_lengths.items() )
+ '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '''
+ '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'''
) )
_lowerCamelCase = max(lists_lengths.values() , default=0 )
return max(1 , __UpperCAmelCase )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> List[range]:
'''simple docstring'''
_lowerCamelCase = []
for group_idx in range(__UpperCAmelCase ):
_lowerCamelCase = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
_lowerCamelCase = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
_lowerCamelCase = range(__UpperCAmelCase , start + num_shards_to_add )
shards_indices_per_group.append(__UpperCAmelCase )
return shards_indices_per_group
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> List[dict]:
'''simple docstring'''
_lowerCamelCase = _number_of_shards_in_gen_kwargs(__UpperCAmelCase )
if num_shards == 1:
return [dict(__UpperCAmelCase )]
else:
_lowerCamelCase = _distribute_shards(num_shards=__UpperCAmelCase , max_num_jobs=__UpperCAmelCase )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(__UpperCAmelCase , __UpperCAmelCase )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(__UpperCAmelCase ) )
]
def __magic_name__( __UpperCAmelCase ) -> dict:
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , __UpperCAmelCase )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> dict:
'''simple docstring'''
_lowerCamelCase = {len(__UpperCAmelCase ) for value in gen_kwargs.values() if isinstance(__UpperCAmelCase , __UpperCAmelCase )}
_lowerCamelCase = {}
for size in list_sizes:
_lowerCamelCase = list(range(__UpperCAmelCase ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
_lowerCamelCase = dict(__UpperCAmelCase )
for key, value in shuffled_kwargs.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_lowerCamelCase = [value[i] for i in indices_per_size[len(__UpperCAmelCase )]]
return shuffled_kwargs | 638 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''facebook/data2vec-vision-base-ft''': (
'''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'''
),
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = "data2vec-vision"
def __init__( self : int , _UpperCamelCase : int=7_6_8 , _UpperCamelCase : str=1_2 , _UpperCamelCase : str=1_2 , _UpperCamelCase : int=3_0_7_2 , _UpperCamelCase : List[Any]="gelu" , _UpperCamelCase : List[str]=0.0 , _UpperCamelCase : str=0.0 , _UpperCamelCase : List[str]=0.02 , _UpperCamelCase : Optional[int]=1e-12 , _UpperCamelCase : Optional[int]=2_2_4 , _UpperCamelCase : str=1_6 , _UpperCamelCase : Optional[int]=3 , _UpperCamelCase : Optional[int]=False , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : List[Any]=False , _UpperCamelCase : List[Any]=False , _UpperCamelCase : Union[str, Any]=0.1 , _UpperCamelCase : List[str]=0.1 , _UpperCamelCase : Tuple=True , _UpperCamelCase : Optional[int]=[3, 5, 7, 1_1] , _UpperCamelCase : Dict=[1, 2, 3, 6] , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Dict=0.4 , _UpperCamelCase : Any=2_5_6 , _UpperCamelCase : List[str]=1 , _UpperCamelCase : Dict=False , _UpperCamelCase : Union[str, Any]=2_5_5 , **_UpperCamelCase : Dict , ) ->List[Any]:
super().__init__(**_UpperCamelCase )
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = use_mask_token
snake_case_ = use_absolute_position_embeddings
snake_case_ = use_relative_position_bias
snake_case_ = use_shared_relative_position_bias
snake_case_ = layer_scale_init_value
snake_case_ = drop_path_rate
snake_case_ = use_mean_pooling
# decode head attributes (semantic segmentation)
snake_case_ = out_indices
snake_case_ = pool_scales
# auxiliary head attributes (semantic segmentation)
snake_case_ = use_auxiliary_head
snake_case_ = auxiliary_loss_weight
snake_case_ = auxiliary_channels
snake_case_ = auxiliary_num_convs
snake_case_ = auxiliary_concat_input
snake_case_ = semantic_loss_ignore_index
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = version.parse("1.11" )
@property
def snake_case__( self : List[str] ) ->Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def snake_case__( self : Tuple ) ->float:
return 1e-4 | 39 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _UpperCamelCase ( unittest.TestCase):
def A (self ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A (self ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = UNetaDModel(
sample_size=(3_2, 6_4) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return model
@property
def A (self ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , cross_attention_dim=1_0 , )
return model
@property
def A (self ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = AutoencoderKL(
sample_size=(1_2_8, 6_4) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , )
A__ = UNetaDModel(
sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return vqvae, unet
@slow
def A (self ):
"""simple docstring"""
A__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
A__ = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
A__ = DDPMScheduler()
A__ = AudioDiffusionPipeline(vqvae=lowerCamelCase__ , unet=self.dummy_unet , mel=lowerCamelCase__ , scheduler=lowerCamelCase__ )
A__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
A__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(4_2 )
A__ = pipe(generator=lowerCamelCase__ , steps=4 )
A__ = output.audios[0]
A__ = output.images[0]
A__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(4_2 )
A__ = pipe(generator=lowerCamelCase__ , steps=4 , return_dict=lowerCamelCase__ )
A__ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
A__ = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0]
A__ = np.frombuffer(image_from_tuple.tobytes() , dtype="""uint8""" )[:1_0]
A__ = np.array([6_9, 2_5_5, 2_5_5, 2_5_5, 0, 0, 7_7, 1_8_1, 1_2, 1_2_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
A__ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
A__ = DDIMScheduler()
A__ = self.dummy_vqvae_and_unet
A__ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=lowerCamelCase__ , scheduler=lowerCamelCase__ )
A__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
np.random.seed(0 )
A__ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
A__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(4_2 )
A__ = pipe(raw_audio=lowerCamelCase__ , generator=lowerCamelCase__ , start_step=5 , steps=1_0 )
A__ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
A__ = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0]
A__ = np.array([1_2_0, 1_1_7, 1_1_0, 1_0_9, 1_3_8, 1_6_7, 1_3_8, 1_4_8, 1_3_2, 1_2_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
A__ = self.dummy_unet_condition
A__ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=lowerCamelCase__ , mel=lowerCamelCase__ , scheduler=lowerCamelCase__ )
A__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
np.random.seed(0 )
A__ = torch.rand((1, 1, 1_0) )
A__ = pipe(generator=lowerCamelCase__ , encoding=lowerCamelCase__ )
A__ = output.images[0]
A__ = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0]
A__ = np.array([1_0_7, 1_0_3, 1_2_0, 1_2_7, 1_4_2, 1_2_2, 1_1_3, 1_2_2, 9_7, 1_1_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase):
def A (self ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A (self ):
"""simple docstring"""
A__ = torch_device
A__ = DiffusionPipeline.from_pretrained("""teticio/audio-diffusion-ddim-256""" )
A__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
A__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(4_2 )
A__ = pipe(generator=lowerCamelCase__ )
A__ = output.audios[0]
A__ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
A__ = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0]
A__ = np.array([1_5_1, 1_6_7, 1_5_4, 1_4_4, 1_2_2, 1_3_4, 1_2_1, 1_0_5, 7_0, 2_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 574 | 0 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case_ ( _A , unittest.TestCase):
lowerCamelCase :str = ProphetNetTokenizer
lowerCamelCase :List[str] = False
def __lowercase ( self ) -> Any:
super().setUp()
lowerCamelCase : Any =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase : Union[str, Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowercase ( self , __lowercase ) -> Optional[Any]:
lowerCamelCase : List[str] ='''UNwant\u00E9d,running'''
lowerCamelCase : Dict ='''unwanted, running'''
return input_text, output_text
def __lowercase ( self ) -> Union[str, Any]:
lowerCamelCase : Dict =self.tokenizer_class(self.vocab_file )
lowerCamelCase : Union[str, Any] =tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__lowercase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def __lowercase ( self ) -> str:
lowerCamelCase : Union[str, Any] =BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def __lowercase ( self ) -> Dict:
lowerCamelCase : Optional[int] =BasicTokenizer(do_lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __lowercase ( self ) -> Optional[int]:
lowerCamelCase : Dict =BasicTokenizer(do_lower_case=__lowercase , strip_accents=__lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def __lowercase ( self ) -> int:
lowerCamelCase : Dict =BasicTokenizer(do_lower_case=__lowercase , strip_accents=__lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __lowercase ( self ) -> Tuple:
lowerCamelCase : str =BasicTokenizer(do_lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __lowercase ( self ) -> int:
lowerCamelCase : int =BasicTokenizer(do_lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowercase ( self ) -> Dict:
lowerCamelCase : List[str] =BasicTokenizer(do_lower_case=__lowercase , strip_accents=__lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowercase ( self ) -> List[Any]:
lowerCamelCase : Optional[Any] =BasicTokenizer(do_lower_case=__lowercase , strip_accents=__lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowercase ( self ) -> Dict:
lowerCamelCase : Any =BasicTokenizer(do_lower_case=__lowercase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def __lowercase ( self ) -> Optional[int]:
lowerCamelCase : Any =['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
lowerCamelCase : List[Any] ={}
for i, token in enumerate(__lowercase ):
lowerCamelCase : List[str] =i
lowerCamelCase : Optional[Any] =WordpieceTokenizer(vocab=__lowercase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
@require_torch
def __lowercase ( self ) -> List[str]:
lowerCamelCase : str =self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
lowerCamelCase : Union[str, Any] =['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCamelCase : str =[1_0_3_7, 2_1_4_6, 2_0_4_2_3, 2_0_0_5, 7_6_8_0, 7_8_4_9, 3_9_8_9, 1_0_1_2, 1_0_2]
lowerCamelCase : int =tokenizer(__lowercase , padding=__lowercase , return_tensors='''pt''' )
self.assertIsInstance(__lowercase , __lowercase )
lowerCamelCase : Dict =list(batch.input_ids.numpy()[0] )
self.assertListEqual(__lowercase , __lowercase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def __lowercase ( self ) -> Optional[Any]:
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def __lowercase ( self ) -> Union[str, Any]:
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def __lowercase ( self ) -> Optional[Any]:
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
@slow
def __lowercase ( self ) -> List[Any]:
lowerCamelCase : List[str] =self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
lowerCamelCase : Dict =tokenizer.encode('''sequence builders''' , add_special_tokens=__lowercase )
lowerCamelCase : Any =tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowercase )
lowerCamelCase : Optional[int] =tokenizer.build_inputs_with_special_tokens(__lowercase )
lowerCamelCase : Union[str, Any] =tokenizer.build_inputs_with_special_tokens(__lowercase , __lowercase )
assert encoded_sentence == text + [1_0_2]
assert encoded_pair == text + [1_0_2] + text_a + [1_0_2]
| 262 |
from __future__ import annotations
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , ) -> tuple[int, float, str]:
lowerCamelCase : Union[str, Any] =cipher_alphabet or [chr(SCREAMING_SNAKE_CASE_ ) for i in range(9_7 , 1_2_3 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
lowerCamelCase : str ={
'''a''': 0.0_8_4_9_7,
'''b''': 0.0_1_4_9_2,
'''c''': 0.0_2_2_0_2,
'''d''': 0.0_4_2_5_3,
'''e''': 0.1_1_1_6_2,
'''f''': 0.0_2_2_2_8,
'''g''': 0.0_2_0_1_5,
'''h''': 0.0_6_0_9_4,
'''i''': 0.0_7_5_4_6,
'''j''': 0.0_0_1_5_3,
'''k''': 0.0_1_2_9_2,
'''l''': 0.0_4_0_2_5,
'''m''': 0.0_2_4_0_6,
'''n''': 0.0_6_7_4_9,
'''o''': 0.0_7_5_0_7,
'''p''': 0.0_1_9_2_9,
'''q''': 0.0_0_0_9_5,
'''r''': 0.0_7_5_8_7,
'''s''': 0.0_6_3_2_7,
'''t''': 0.0_9_3_5_6,
'''u''': 0.0_2_7_5_8,
'''v''': 0.0_0_9_7_8,
'''w''': 0.0_2_5_6_0,
'''x''': 0.0_0_1_5_0,
'''y''': 0.0_1_9_9_4,
'''z''': 0.0_0_0_7_7,
}
else:
# Custom frequencies dictionary
lowerCamelCase : Tuple =frequencies_dict
if not case_sensitive:
lowerCamelCase : Any =ciphertext.lower()
# Chi squared statistic values
lowerCamelCase : dict[int, tuple[float, str]] ={}
# cycle through all of the shifts
for shift in range(len(SCREAMING_SNAKE_CASE_ ) ):
lowerCamelCase : int =''''''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
lowerCamelCase : Dict =(alphabet_letters.index(letter.lower() ) - shift) % len(
SCREAMING_SNAKE_CASE_ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
lowerCamelCase : str =0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
lowerCamelCase : Union[str, Any] =letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
lowerCamelCase : List[str] =decrypted_with_shift.lower().count(SCREAMING_SNAKE_CASE_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCamelCase : Optional[Any] =frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCamelCase : Tuple =((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
lowerCamelCase : Optional[int] =decrypted_with_shift.count(SCREAMING_SNAKE_CASE_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCamelCase : Union[str, Any] =frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCamelCase : int =((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
lowerCamelCase : Dict =(
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(SCREAMING_SNAKE_CASE_ ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
lowerCamelCase : int =min(
SCREAMING_SNAKE_CASE_ , key=SCREAMING_SNAKE_CASE_ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Any =chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 262 | 1 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
lowerCAmelCase__ = """0.12""" # assumed parallelism: 8
if is_torch_available():
import torch
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ) -> Optional[Any]:
'''simple docstring'''
if rng is None:
__lowercase = random.Random()
__lowercase = 1
for dim in shape:
total_dims *= dim
__lowercase = []
for _ in range(_UpperCAmelCase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
__lowercase = np.array(_UpperCAmelCase , dtype=jnp.intaa ).reshape(_UpperCAmelCase )
return output
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase=None ) -> str:
'''simple docstring'''
__lowercase = ids_tensor(_UpperCAmelCase , vocab_size=2 , rng=_UpperCAmelCase )
# make sure that at least one token is attended to for each batch
__lowercase = 1
return attn_mask
@require_flax
class snake_case :
"""simple docstring"""
__lowerCAmelCase = None
__lowerCAmelCase = ()
def snake_case__ ( self ):
__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
__lowercase = 2
__lowercase = inputs['''input_ids'''].shape[-1] // 2
__lowercase = inputs['''input_ids'''][:max_batch_size, :sequence_length]
__lowercase = jnp.ones_like(__lowerCAmelCase )
__lowercase = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
__lowercase = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
__lowercase = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def snake_case__ ( self ):
__lowercase = self._get_input_ids_and_config()
__lowercase = False
__lowercase = max_length
__lowercase = 0
for model_class in self.all_generative_model_classes:
__lowercase = model_class(__lowerCAmelCase )
__lowercase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowercase = getattr(__lowerCAmelCase , __lowerCAmelCase )
__lowercase = pt_model_class(__lowerCAmelCase ).eval()
__lowercase = load_flax_weights_in_pytorch_model(__lowerCAmelCase , flax_model.params )
__lowercase = flax_model.generate(__lowerCAmelCase ).sequences
__lowercase = pt_model.generate(torch.tensor(__lowerCAmelCase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
__lowercase = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def snake_case__ ( self ):
__lowercase = self._get_input_ids_and_config()
__lowercase = False
__lowercase = max_length
for model_class in self.all_generative_model_classes:
__lowercase = model_class(__lowerCAmelCase )
__lowercase = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCAmelCase )
__lowercase = jit(model.generate )
__lowercase = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case__ ( self ):
__lowercase = self._get_input_ids_and_config()
__lowercase = True
__lowercase = max_length
for model_class in self.all_generative_model_classes:
__lowercase = model_class(__lowerCAmelCase )
__lowercase = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCAmelCase )
__lowercase = jit(model.generate )
__lowercase = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case__ ( self ):
__lowercase = self._get_input_ids_and_config()
__lowercase = False
__lowercase = max_length
__lowercase = 2
for model_class in self.all_generative_model_classes:
__lowercase = model_class(__lowerCAmelCase )
__lowercase = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCAmelCase )
__lowercase = jit(model.generate )
__lowercase = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case__ ( self ):
__lowercase = self._get_input_ids_and_config()
__lowercase = False
__lowercase = max_length
__lowercase = 2
__lowercase = 2
for model_class in self.all_generative_model_classes:
__lowercase = model_class(__lowerCAmelCase )
__lowercase = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def snake_case__ ( self ):
__lowercase = self._get_input_ids_and_config()
__lowercase = True
__lowercase = max_length
__lowercase = 0.8
__lowercase = 10
__lowercase = 0.3
__lowercase = 1
__lowercase = 8
__lowercase = 9
for model_class in self.all_generative_model_classes:
__lowercase = model_class(__lowerCAmelCase )
__lowercase = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCAmelCase )
__lowercase = jit(model.generate )
__lowercase = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case__ ( self ):
__lowercase = self._get_input_ids_and_config()
__lowercase = max_length
__lowercase = 1
__lowercase = 8
__lowercase = 9
for model_class in self.all_generative_model_classes:
__lowercase = model_class(__lowerCAmelCase )
__lowercase = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCAmelCase )
__lowercase = jit(model.generate )
__lowercase = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case__ ( self ):
__lowercase = self._get_input_ids_and_config()
__lowercase = max_length
__lowercase = 2
__lowercase = 1
__lowercase = 8
__lowercase = 9
for model_class in self.all_generative_model_classes:
__lowercase = model_class(__lowerCAmelCase )
__lowercase = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCAmelCase )
__lowercase = jit(model.generate )
__lowercase = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case__ ( self ):
__lowercase = self._get_input_ids_and_config()
# pad attention mask on the left
__lowercase = attention_mask.at[(0, 0)].set(0 )
__lowercase = False
__lowercase = max_length
for model_class in self.all_generative_model_classes:
__lowercase = model_class(__lowerCAmelCase )
__lowercase = model.generate(__lowerCAmelCase , attention_mask=__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCAmelCase )
__lowercase = jit(model.generate )
__lowercase = jit_generate(__lowerCAmelCase , attention_mask=__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case__ ( self ):
__lowercase = self._get_input_ids_and_config()
# pad attention mask on the left
__lowercase = attention_mask.at[(0, 0)].set(0 )
__lowercase = True
__lowercase = max_length
for model_class in self.all_generative_model_classes:
__lowercase = model_class(__lowerCAmelCase )
__lowercase = model.generate(__lowerCAmelCase , attention_mask=__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCAmelCase )
__lowercase = jit(model.generate )
__lowercase = jit_generate(__lowerCAmelCase , attention_mask=__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case__ ( self ):
__lowercase = self._get_input_ids_and_config()
# pad attention mask on the left
__lowercase = attention_mask.at[(0, 0)].set(0 )
__lowercase = 2
__lowercase = max_length
for model_class in self.all_generative_model_classes:
__lowercase = model_class(__lowerCAmelCase )
__lowercase = model.generate(__lowerCAmelCase , attention_mask=__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCAmelCase )
__lowercase = jit(model.generate )
__lowercase = jit_generate(__lowerCAmelCase , attention_mask=__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self ):
__lowercase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" )
__lowercase = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
__lowercase = '''Hello world'''
__lowercase = tokenizer(__lowerCAmelCase , return_tensors="np" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__lowerCAmelCase , "do_samples" ):
model.generate(__lowerCAmelCase , do_samples=__lowerCAmelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__lowerCAmelCase , "foo" ):
__lowercase = {'''foo''': '''bar'''}
model.generate(__lowerCAmelCase , **__lowerCAmelCase )
| 321 | '''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase = 400_0000 ):
lowercase__ : List[Any] = [0, 1]
lowercase__ : Union[str, Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowercase__ : Dict = 0
for j in range(len(UpperCAmelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'{solution() = }')
| 152 | 0 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
UpperCamelCase = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
UpperCamelCase = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
UpperCamelCase = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
UpperCamelCase = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
UpperCamelCase = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
UpperCamelCase = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
UpperCamelCase = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
UpperCamelCase = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
UpperCamelCase = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
UpperCamelCase = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
UpperCamelCase = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
UpperCamelCase = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
UpperCamelCase = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
UpperCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
UpperCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
UpperCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
UpperCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
UpperCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
UpperCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
UpperCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
UpperCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
UpperCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
UpperCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
UpperCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
UpperCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
UpperCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
UpperCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class _a ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = FLAX_MODEL_MAPPING
UpperCamelCase = auto_class_update(FlaxAutoModel)
class _a ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase_ : str = FLAX_MODEL_FOR_PRETRAINING_MAPPING
UpperCamelCase = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class _a ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
UpperCamelCase = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class _a ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase_ : Dict = FLAX_MODEL_FOR_MASKED_LM_MAPPING
UpperCamelCase = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class _a ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCamelCase = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class _a ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCamelCase = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class _a ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase_ : str = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
UpperCamelCase = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class _a ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCamelCase = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class _a ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
UpperCamelCase = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class _a ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
UpperCamelCase = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class _a ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase_ : List[str] = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCamelCase = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class _a ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase_ : List[str] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCamelCase = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class _a ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
UpperCamelCase = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 387 | import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _a ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=3 , __UpperCAmelCase=18 , __UpperCAmelCase=30 , __UpperCAmelCase=400 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=False , ):
__A : Tuple = size if size is not None else {"height": 20, "width": 20}
__A : Tuple = crop_size if crop_size is not None else {"height": 18, "width": 18}
__A : int = parent
__A : List[Any] = batch_size
__A : Tuple = num_channels
__A : Any = image_size
__A : Optional[int] = min_resolution
__A : Any = max_resolution
__A : str = do_resize
__A : Tuple = size
__A : Tuple = do_center_crop
__A : Union[str, Any] = crop_size
__A : Tuple = do_normalize
__A : Union[str, Any] = image_mean
__A : Dict = image_std
__A : Optional[Any] = do_reduce_labels
def __UpperCAmelCase( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def lowerCamelCase_ ( ) -> str:
__A : List[str] = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
__A : Optional[Any] = Image.open(dataset[0]["file"] )
__A : Union[str, Any] = Image.open(dataset[1]["file"] )
return image, map
def lowerCamelCase_ ( ) -> Dict:
__A : str = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
__A : List[Any] = Image.open(ds[0]["file"] )
__A : Union[str, Any] = Image.open(ds[1]["file"] )
__A : Optional[Any] = Image.open(ds[2]["file"] )
__A : str = Image.open(ds[3]["file"] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _a ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Tuple = BeitImageProcessor if is_vision_available() else None
def __UpperCAmelCase( self ):
__A : Tuple = BeitImageProcessingTester(self )
@property
def __UpperCAmelCase( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase( self ):
__A : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "size" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "do_center_crop" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "center_crop" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "image_std" ) )
def __UpperCAmelCase( self ):
__A : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 20, "width": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
self.assertEqual(image_processor.do_reduce_labels , __UpperCAmelCase )
__A : str = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__UpperCAmelCase )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
self.assertEqual(image_processor.do_reduce_labels , __UpperCAmelCase )
def __UpperCAmelCase( self ):
pass
def __UpperCAmelCase( self ):
# Initialize image_processing
__A : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
__A : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__A : List[Any] = image_processing(__UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __UpperCAmelCase( self ):
# Initialize image_processing
__A : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input
__A : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__A : List[Any] = image_processing(__UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __UpperCAmelCase( self ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
__A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__A : Union[str, Any] = image_processing(__UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __UpperCAmelCase( self ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
__A : Tuple = []
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
__A : str = image_processing(image_inputs[0] , maps[0] , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test batched
__A : Any = image_processing(__UpperCAmelCase , __UpperCAmelCase , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test not batched input (PIL images)
__A , __A : Optional[Any] = prepare_semantic_single_inputs()
__A : Dict = image_processing(__UpperCAmelCase , __UpperCAmelCase , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test batched input (PIL images)
__A , __A : List[Any] = prepare_semantic_batch_inputs()
__A : Tuple = image_processing(__UpperCAmelCase , __UpperCAmelCase , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
2,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
def __UpperCAmelCase( self ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
__A , __A : List[Any] = prepare_semantic_single_inputs()
__A : Optional[int] = image_processing(__UpperCAmelCase , __UpperCAmelCase , return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 150 )
__A : Optional[Any] = True
__A : int = image_processing(__UpperCAmelCase , __UpperCAmelCase , return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
| 387 | 1 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
lowercase_ = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F"{bindir}/../../examples/pytorch/translation"):
from run_translation import main # noqa
set_seed(42)
lowercase_ = '''sshleifer/student_marian_en_ro_6_1'''
lowercase_ = '''sshleifer/tiny-mbart'''
@require_torch
class __a ( SCREAMING_SNAKE_CASE ):
def UpperCamelCase ( self : Optional[int] , snake_case_ : Any=False , snake_case_ : Dict=None , snake_case_ : str=True , snake_case_ : Union[str, Any]=True , snake_case_ : List[Any]=True , snake_case_ : Any=True , )-> List[Any]:
__lowerCAmelCase =self.run_trainer(
eval_steps=1 , max_len=12 , model_name=snake_case_ , num_train_epochs=1 , distributed=snake_case_ , extra_args_str=snake_case_ , predict_with_generate=snake_case_ , do_train=snake_case_ , do_eval=snake_case_ , do_predict=snake_case_ , )
__lowerCAmelCase =TrainerState.load_from_json(os.path.join(snake_case_ , """trainer_state.json""")).log_history
if not do_eval:
return
__lowerCAmelCase =[log for log in logs if """eval_loss""" in log.keys()]
__lowerCAmelCase =eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
__lowerCAmelCase =eval_metrics[-1]
assert isinstance(last_step_stats["""eval_bleu"""] , snake_case_)
assert not math.isnan(float(last_step_stats["""eval_loss"""])), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def UpperCamelCase ( self : int)-> Tuple:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def UpperCamelCase ( self : Tuple)-> Optional[int]:
self.run_seqaseq_quick(distributed=snake_case_)
@require_torch_multi_gpu
def UpperCamelCase ( self : str)-> Union[str, Any]:
self.run_seqaseq_quick(distributed=snake_case_)
@unittest.skip("""Requires an update of the env running those tests""")
@require_torch_multi_gpu
@require_fairscale
def UpperCamelCase ( self : Optional[int])-> Optional[int]:
self.run_seqaseq_quick(distributed=snake_case_ , extra_args_str="""--sharded_ddp simple""")
@unittest.skip("""Requires an update of the env running those tests""")
@require_torch_multi_gpu
@require_fairscale
def UpperCamelCase ( self : Union[str, Any])-> str:
self.run_seqaseq_quick(distributed=snake_case_ , extra_args_str="""--sharded_ddp simple --fp16""")
@unittest.skip("""Requires an update of the env running those tests""")
@require_torch_multi_gpu
@require_fairscale
def UpperCamelCase ( self : Optional[int])-> Any:
self.run_seqaseq_quick(distributed=snake_case_ , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=snake_case_)
@unittest.skip("""Requires an update of the env running those tests""")
@require_torch_multi_gpu
@require_fairscale
def UpperCamelCase ( self : int)-> Union[str, Any]:
self.run_seqaseq_quick(
distributed=snake_case_ , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=snake_case_)
@require_apex
@require_torch_gpu
def UpperCamelCase ( self : Optional[int])-> str:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=snake_case_ , extra_args_str="""--fp16 --fp16_backend=apex""")
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=snake_case_ , extra_args_str="""--fp16 --fp16_backend=apex""")
@parameterized.expand(["""base""", """low""", """high""", """mixed"""])
@require_torch_multi_gpu
def UpperCamelCase ( self : List[Any] , snake_case_ : Dict)-> Dict:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
__lowerCAmelCase ={
# test with the default log_level - should be info and thus log info once
"""base""": {"""extra_args_str""": """""", """n_matches""": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"""low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"""high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"""mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0},
}
__lowerCAmelCase =experiments[experiment_id]
__lowerCAmelCase ={"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False}
__lowerCAmelCase ="""Running training"""
with CaptureStderr() as cl:
self.run_seqaseq_quick(**snake_case_ , extra_args_str=data["""extra_args_str"""])
__lowerCAmelCase =len(re.findall(snake_case_ , cl.err))
self.assertEqual(snake_case_ , data["""n_matches"""])
@slow
def UpperCamelCase ( self : Union[str, Any])-> Tuple:
__lowerCAmelCase =self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=snake_case_ , learning_rate=3e-4 , num_train_epochs=10 , distributed=snake_case_ , )
# Check metrics
__lowerCAmelCase =TrainerState.load_from_json(os.path.join(snake_case_ , """trainer_state.json""")).log_history
__lowerCAmelCase =[log for log in logs if """eval_loss""" in log.keys()]
__lowerCAmelCase =eval_metrics[0]
__lowerCAmelCase =eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["""eval_bleu"""] , snake_case_)
# test if do_predict saves generations and metrics
__lowerCAmelCase =os.listdir(snake_case_)
__lowerCAmelCase ={os.path.basename(snake_case_) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def UpperCamelCase ( self : List[Any])-> Any:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(snake_case_ : str) -> Tuple[int, float]:
__lowerCAmelCase ="""--skip_memory_metrics 0"""
__lowerCAmelCase =self.run_trainer(
max_len=1_28 , model_name=snake_case_ , learning_rate=3e-4 , num_train_epochs=1 , optim=snake_case_ , distributed=snake_case_ , extra_args_str=snake_case_ , do_eval=snake_case_ , do_predict=snake_case_ , n_gpus_to_use=1 , )
# Check metrics
__lowerCAmelCase =TrainerState.load_from_json(Path(snake_case_ , """trainer_state.json""")).log_history
__lowerCAmelCase =int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20)
__lowerCAmelCase =int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20)
__lowerCAmelCase =logs[0]["""train_loss"""]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase =train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value)
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase =train_and_return_metrics(OptimizerNames.ADAMW_BNB.value)
__lowerCAmelCase =gpu_alloc_mem_orig - gpu_alloc_mem_bnb
__lowerCAmelCase =gpu_peak_mem_orig + gpu_alloc_mem_orig
__lowerCAmelCase =gpu_peak_mem_bnb + gpu_alloc_mem_bnb
__lowerCAmelCase =gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
__lowerCAmelCase =1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
snake_case_ , snake_case_ , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"""
F""" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"""
F""" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB""" , )
self.assertGreater(
snake_case_ , snake_case_ , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"""
F""" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"""
F""" gpu_total_mem_bnb={gpu_total_mem_bnb}MB""" , )
self.assertEqual(
snake_case_ , snake_case_ , F"""loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}""")
def UpperCamelCase ( self : List[str] , snake_case_ : int , snake_case_ : str , snake_case_ : int , snake_case_ : float = 3e-3 , snake_case_ : str = "adafactor" , snake_case_ : bool = False , snake_case_ : str = None , snake_case_ : int = 0 , snake_case_ : bool = True , snake_case_ : bool = True , snake_case_ : bool = True , snake_case_ : bool = True , snake_case_ : int = None , )-> Optional[int]:
__lowerCAmelCase =self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro"""
__lowerCAmelCase =self.get_auto_remove_tmp_dir()
__lowerCAmelCase =F"""
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(snake_case_)}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(snake_case_)}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
""".split()
__lowerCAmelCase =F"""
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(snake_case_)}
""".split()
__lowerCAmelCase ="""
--do_predict
""".split()
__lowerCAmelCase =[]
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F"""--optim {optim}""".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
__lowerCAmelCase =get_gpu_count()
__lowerCAmelCase =get_torch_dist_unique_port()
__lowerCAmelCase =F"""
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
""".split()
__lowerCAmelCase =[sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(snake_case_ , env=self.get_env())
else:
__lowerCAmelCase =["""run_translation.py"""] + args
with patch.object(snake_case_ , """argv""" , snake_case_):
main()
return output_dir
| 354 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __a ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = "gpt_neo"
SCREAMING_SNAKE_CASE = ["past_key_values"]
SCREAMING_SNAKE_CASE = {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self : Optional[int] , snake_case_ : int=5_02_57 , snake_case_ : Tuple=20_48 , snake_case_ : Optional[int]=20_48 , snake_case_ : int=24 , snake_case_ : List[Any]=[[["global", "local"], 12]] , snake_case_ : Tuple=16 , snake_case_ : Optional[int]=None , snake_case_ : Dict=2_56 , snake_case_ : List[str]="gelu_new" , snake_case_ : List[Any]=0.0 , snake_case_ : Tuple=0.0 , snake_case_ : Tuple=0.0 , snake_case_ : str=0.1 , snake_case_ : Union[str, Any]=1e-5 , snake_case_ : str=0.0_2 , snake_case_ : Union[str, Any]=True , snake_case_ : Optional[int]=5_02_56 , snake_case_ : Tuple=5_02_56 , **snake_case_ : Optional[int] , )-> Tuple:
__lowerCAmelCase =vocab_size
__lowerCAmelCase =max_position_embeddings
__lowerCAmelCase =hidden_size
__lowerCAmelCase =num_layers
__lowerCAmelCase =num_heads
__lowerCAmelCase =intermediate_size
__lowerCAmelCase =window_size
__lowerCAmelCase =activation_function
__lowerCAmelCase =resid_dropout
__lowerCAmelCase =embed_dropout
__lowerCAmelCase =attention_dropout
__lowerCAmelCase =classifier_dropout
__lowerCAmelCase =layer_norm_epsilon
__lowerCAmelCase =initializer_range
__lowerCAmelCase =use_cache
__lowerCAmelCase =bos_token_id
__lowerCAmelCase =eos_token_id
__lowerCAmelCase =attention_types
__lowerCAmelCase =self.expand_attention_types_params(snake_case_)
if len(self.attention_layers) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
F"""but is `len(config.attention_layers) = {len(self.attention_layers)}`, """
F"""`config.num_layers = {self.num_layers}`. """
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""")
super().__init__(bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_)
@staticmethod
def UpperCamelCase ( snake_case_ : Optional[int])-> Any:
__lowerCAmelCase =[]
for item in attention_types:
for _ in range(item[1]):
attentions.extend(item[0])
return attentions
def __lowerCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int ) -> int:
import torch
__lowerCAmelCase =input.size()
__lowerCAmelCase =len(__lowerCamelCase )
__lowerCAmelCase =shape[dimension]
__lowerCAmelCase =torch.arange(0 , __lowerCamelCase , __lowerCamelCase )
__lowerCAmelCase =torch.div(sizedim - size , __lowerCamelCase , rounding_mode="""floor""" ) + 1
__lowerCAmelCase =torch.arange(__lowerCamelCase ) + low_indices[:min_length][:, None]
__lowerCAmelCase =[slice(__lowerCamelCase )] * rank
__lowerCAmelCase =indices
__lowerCAmelCase =input[s]
__lowerCAmelCase =list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(__lowerCamelCase )
def __lowerCAmelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ) -> List[str]:
import torch
__lowerCAmelCase =torch.arange(1 , __lowerCamelCase )
__lowerCAmelCase =torch.remainder(__lowerCamelCase , __lowerCamelCase )
__lowerCAmelCase =remainders == 0
__lowerCAmelCase =candidates[divisor_indices]
__lowerCAmelCase =torch.max(__lowerCamelCase )
return largest_divisor, torch.div(__lowerCamelCase , __lowerCamelCase , rounding_mode="""floor""" )
class __a ( SCREAMING_SNAKE_CASE ):
@property
def UpperCamelCase ( self : Union[str, Any])-> Mapping[str, Mapping[int, str]]:
__lowerCAmelCase =OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}})
if self.use_past:
self.fill_with_past_key_values_(snake_case_ , direction="""inputs""")
__lowerCAmelCase ={0: """batch""", 1: """past_sequence + sequence"""}
else:
__lowerCAmelCase ={0: """batch""", 1: """sequence"""}
return common_inputs
@property
def UpperCamelCase ( self : Optional[int])-> int:
return self._config.num_heads
def UpperCamelCase ( self : int , snake_case_ : PreTrainedTokenizer , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional[TensorType] = None , )-> Mapping[str, Any]:
__lowerCAmelCase =super(snake_case_ , self).generate_dummy_inputs(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_)
# We need to order the input in the way they appears in the forward()
__lowerCAmelCase =OrderedDict({"""input_ids""": common_inputs["""input_ids"""]})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""")
else:
import torch
__lowerCAmelCase , __lowerCAmelCase =common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__lowerCAmelCase =seqlen + 2
__lowerCAmelCase =(
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowerCAmelCase =[
(torch.zeros(snake_case_), torch.zeros(snake_case_)) for _ in range(self.num_layers)
]
__lowerCAmelCase =common_inputs["""attention_mask"""]
if self.use_past:
__lowerCAmelCase =ordered_inputs["""attention_mask"""].dtype
__lowerCAmelCase =torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(snake_case_ , snake_case_ , dtype=snake_case_)] , dim=1)
return ordered_inputs
@property
def UpperCamelCase ( self : Union[str, Any])-> int:
return 13
| 354 | 1 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
SCREAMING_SNAKE_CASE_: Optional[Any] =datasets.utils.logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Dict =['names', 'prefix']
SCREAMING_SNAKE_CASE_: int =['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
SCREAMING_SNAKE_CASE_: Union[str, Any] =['encoding_errors', 'on_bad_lines']
SCREAMING_SNAKE_CASE_: Tuple =['date_format']
@dataclass
class __A ( datasets.BuilderConfig ):
a__ : List[Any] = ""","""
a__ : Optional[Any] = None
a__ : Tuple = """infer"""
a__ : List[Any] = None
a__ : List[str] = None
a__ : Any = None
a__ : int = None
a__ : List[Any] = None
a__ : Optional[Any] = True
a__ : Tuple = None
a__ : int = None
a__ : Dict = None
a__ : int = None
a__ : List[str] = False
a__ : Optional[int] = None
a__ : Union[str, Any] = None
a__ : Optional[Any] = None
a__ : Union[str, Any] = True
a__ : Optional[Any] = True
a__ : str = False
a__ : int = True
a__ : Union[str, Any] = None
a__ : Union[str, Any] = """."""
a__ : List[str] = None
a__ : int = """\""""
a__ : Tuple = 0
a__ : Tuple = None
a__ : Union[str, Any] = None
a__ : List[Any] = None
a__ : Union[str, Any] = None
a__ : List[str] = True
a__ : Optional[int] = True
a__ : int = 0
a__ : Optional[int] = True
a__ : List[Any] = False
a__ : Dict = None
a__ : Tuple = 10_000
a__ : Union[str, Any] = None
a__ : str = """strict"""
a__ : Dict = """error"""
a__ : Dict = None
def _lowercase (self : Union[str, Any] ):
if self.delimiter is not None:
UpperCAmelCase_ = self.delimiter
if self.column_names is not None:
UpperCAmelCase_ = self.column_names
@property
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , __a ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class __A ( datasets.ArrowBasedBuilder ):
a__ : Any = CsvConfig
def _lowercase (self : str ):
return datasets.DatasetInfo(features=self.config.features )
def _lowercase (self : Tuple , __a : Dict ):
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
UpperCAmelCase_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__a , (str, list, tuple) ):
UpperCAmelCase_ = data_files
if isinstance(__a , __a ):
UpperCAmelCase_ = [files]
UpperCAmelCase_ = [dl_manager.iter_files(__a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
UpperCAmelCase_ = []
for split_name, files in data_files.items():
if isinstance(__a , __a ):
UpperCAmelCase_ = [files]
UpperCAmelCase_ = [dl_manager.iter_files(__a ) for file in files]
splits.append(datasets.SplitGenerator(name=__a , gen_kwargs={"files": files} ) )
return splits
def _lowercase (self : List[Any] , __a : int ):
if self.config.features is not None:
UpperCAmelCase_ = self.config.features.arrow_schema
if all(not require_storage_cast(__a ) for feature in self.config.features.values() ):
# cheaper cast
UpperCAmelCase_ = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=__a )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
UpperCAmelCase_ = table_cast(__a , __a )
return pa_table
def _lowercase (self : Union[str, Any] , __a : Any ):
UpperCAmelCase_ = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
UpperCAmelCase_ = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(__a ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(__a ) ):
UpperCAmelCase_ = pd.read_csv(__a , iterator=__a , dtype=__a , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(__a ):
UpperCAmelCase_ = pa.Table.from_pandas(__a )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__a )
except ValueError as e:
logger.error(f"""Failed to read file \'{file}\' with error {type(__a )}: {e}""" )
raise
| 712 | '''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_: Dict ={
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: int =[
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: Tuple =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 415 | 0 |
"""simple docstring"""
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def __UpperCamelCase ( snake_case__ ):
if not is_accelerate_available():
return method
A_ : int = version.parse(accelerate.__version__ ).base_version
if version.parse(snake_case__ ) < version.parse("""0.17.0""" ):
return method
def wrapper(self , *snake_case__ , **snake_case__ ):
if hasattr(self , """_hf_hook""" ) and hasattr(self._hf_hook , """pre_forward""" ):
self._hf_hook.pre_forward(self )
return method(self , *snake_case__ , **snake_case__ )
return wrapper
| 180 |
"""simple docstring"""
import math
import os
import sys
def __UpperCamelCase ( snake_case__ ):
A_ : Optional[Any] = """"""
try:
with open(snake_case__ , """rb""" ) as binary_file:
A_ : Union[str, Any] = binary_file.read()
for dat in data:
A_ : Dict = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lexicon.pop(snake_case__ )
A_ : List[str] = last_match_id
if math.loga(snake_case__ ).is_integer():
for curr_key in lexicon:
A_ : Dict = """0""" + lexicon[curr_key]
A_ : int = bin(snake_case__ )[2:]
def __UpperCamelCase ( snake_case__ ):
A_ : Dict = {"""0""": """0""", """1""": """1"""}
A_ , A_ : Optional[int] = """""", """"""
A_ : Tuple = len(snake_case__ )
for i in range(len(snake_case__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
A_ : List[str] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
index += 1
A_ : int = """"""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
A_ : Any = lexicon[curr_string]
result += last_match_id
return result
def __UpperCamelCase ( snake_case__ , snake_case__ ):
A_ : Optional[int] = os.path.getsize(snake_case__ )
A_ : Dict = bin(snake_case__ )[2:]
A_ : Optional[Any] = len(snake_case__ )
return "0" * (length_length - 1) + file_length_binary + compressed
def __UpperCamelCase ( snake_case__ , snake_case__ ):
A_ : Tuple = 8
try:
with open(snake_case__ , """wb""" ) as opened_file:
A_ : Dict = [
to_write[i : i + byte_length]
for i in range(0 , len(snake_case__ ) , snake_case__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(snake_case__ , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def __UpperCamelCase ( snake_case__ , snake_case__ ):
A_ : List[str] = read_file_binary(snake_case__ )
A_ : str = compress_data(snake_case__ )
A_ : int = add_file_length(snake_case__ , snake_case__ )
write_file_binary(snake_case__ , snake_case__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 180 | 1 |
"""simple docstring"""
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@register_to_config
def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[int] = False , ) -> str:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : List[Any] = nn.Embedding(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = nn.Embedding(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = False
__lowerCAmelCase : List[str] = nn.Dropout(p=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = TaConfig(
vocab_size=_SCREAMING_SNAKE_CASE , d_model=_SCREAMING_SNAKE_CASE , num_heads=_SCREAMING_SNAKE_CASE , d_kv=_SCREAMING_SNAKE_CASE , d_ff=_SCREAMING_SNAKE_CASE , dropout_rate=_SCREAMING_SNAKE_CASE , feed_forward_proj=_SCREAMING_SNAKE_CASE , is_decoder=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Optional[int] = nn.ModuleList()
for lyr_num in range(_SCREAMING_SNAKE_CASE):
__lowerCAmelCase : Dict = TaBlock(_SCREAMING_SNAKE_CASE)
self.encoders.append(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = TaLayerNorm(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = nn.Dropout(p=_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: List[Any]) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Any = self.token_embedder(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = encoder_input_tokens.shape[1]
__lowerCAmelCase : Any = torch.arange(_SCREAMING_SNAKE_CASE , device=encoder_input_tokens.device)
x += self.position_encoding(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = self.dropout_pre(_SCREAMING_SNAKE_CASE)
# inverted the attention mask
__lowerCAmelCase : Optional[int] = encoder_input_tokens.size()
__lowerCAmelCase : Union[str, Any] = self.get_extended_attention_mask(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
for lyr in self.encoders:
__lowerCAmelCase : Dict = lyr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)[0]
__lowerCAmelCase : Tuple = self.layer_norm(_SCREAMING_SNAKE_CASE)
return self.dropout_post(_SCREAMING_SNAKE_CASE), encoder_inputs_mask | 716 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: Any=0) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : str = np.random.RandomState(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = self.get_dummy_inputs()
__lowerCAmelCase : Optional[Any] = pipe(**_SCREAMING_SNAKE_CASE).images
__lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCAmelCase : List[str] = np.array([0.6_5072, 0.5_8492, 0.4_8219, 0.5_5521, 0.5_3180, 0.5_5939, 0.5_0697, 0.3_9800, 0.4_6455])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> Any:
"""simple docstring"""
__lowerCAmelCase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
__lowerCAmelCase : Dict = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_SCREAMING_SNAKE_CASE)
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = self.get_dummy_inputs()
__lowerCAmelCase : str = pipe(**_SCREAMING_SNAKE_CASE).images
__lowerCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCAmelCase : str = np.array([0.6_5863, 0.5_9425, 0.4_9326, 0.5_6313, 0.5_3875, 0.5_6627, 0.5_1065, 0.3_9777, 0.4_6330])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self: str) -> Any:
"""simple docstring"""
__lowerCAmelCase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
__lowerCAmelCase : Optional[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = self.get_dummy_inputs()
__lowerCAmelCase : Tuple = pipe(**_SCREAMING_SNAKE_CASE).images
__lowerCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCAmelCase : Any = np.array([0.5_3755, 0.6_0786, 0.4_7402, 0.4_9488, 0.5_1869, 0.4_9819, 0.4_7985, 0.3_8957, 0.4_4279])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
__lowerCAmelCase : Dict = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = self.get_dummy_inputs()
__lowerCAmelCase : Tuple = pipe(**_SCREAMING_SNAKE_CASE).images
__lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCAmelCase : Tuple = np.array([0.5_3755, 0.6_0786, 0.4_7402, 0.4_9488, 0.5_1869, 0.4_9819, 0.4_7985, 0.3_8957, 0.4_4279])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
__lowerCAmelCase : int = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = self.get_dummy_inputs()
__lowerCAmelCase : List[Any] = pipe(**_SCREAMING_SNAKE_CASE).images
__lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCAmelCase : List[Any] = np.array([0.5_3817, 0.6_0812, 0.4_7384, 0.4_9530, 0.5_1894, 0.4_9814, 0.4_7984, 0.3_8958, 0.4_4271])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
__lowerCAmelCase : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = self.get_dummy_inputs()
__lowerCAmelCase : List[Any] = pipe(**_SCREAMING_SNAKE_CASE).images
__lowerCAmelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCAmelCase : Optional[Any] = np.array([0.5_3895, 0.6_0808, 0.4_7933, 0.4_9608, 0.5_1886, 0.4_9950, 0.4_8053, 0.3_8957, 0.4_4200])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self: Any) -> str:
"""simple docstring"""
__lowerCAmelCase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = self.get_dummy_inputs()
__lowerCAmelCase : List[str] = 3 * [inputs["prompt"]]
# forward
__lowerCAmelCase : Optional[Any] = pipe(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = output.images[0, -3:, -3:, -1]
__lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs()
__lowerCAmelCase : Union[str, Any] = 3 * [inputs.pop("prompt")]
__lowerCAmelCase : Union[str, Any] = pipe.tokenizer(
_SCREAMING_SNAKE_CASE , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=_SCREAMING_SNAKE_CASE , return_tensors="np" , )
__lowerCAmelCase : Dict = text_inputs["input_ids"]
__lowerCAmelCase : str = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0]
__lowerCAmelCase : Union[str, Any] = prompt_embeds
# forward
__lowerCAmelCase : Tuple = pipe(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1e-4
def _SCREAMING_SNAKE_CASE ( self: Any) -> int:
"""simple docstring"""
__lowerCAmelCase : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = self.get_dummy_inputs()
__lowerCAmelCase : Optional[int] = 3 * ["this is a negative prompt"]
__lowerCAmelCase : Union[str, Any] = negative_prompt
__lowerCAmelCase : Union[str, Any] = 3 * [inputs["prompt"]]
# forward
__lowerCAmelCase : List[Any] = pipe(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = output.images[0, -3:, -3:, -1]
__lowerCAmelCase : Any = self.get_dummy_inputs()
__lowerCAmelCase : List[Any] = 3 * [inputs.pop("prompt")]
__lowerCAmelCase : Dict = []
for p in [prompt, negative_prompt]:
__lowerCAmelCase : Optional[Any] = pipe.tokenizer(
_SCREAMING_SNAKE_CASE , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=_SCREAMING_SNAKE_CASE , return_tensors="np" , )
__lowerCAmelCase : Any = text_inputs["input_ids"]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0])
__lowerCAmelCase , __lowerCAmelCase : List[str] = embeds
# forward
__lowerCAmelCase : int = pipe(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class A__ ( unittest.TestCase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> int:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : str = ort.SessionOptions()
__lowerCAmelCase : List[str] = False
return options
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Any = OnnxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = "A painting of a squirrel eating a burger"
np.random.seed(0)
__lowerCAmelCase : str = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="np")
__lowerCAmelCase : Union[str, Any] = output.images
__lowerCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCAmelCase : Dict = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def _SCREAMING_SNAKE_CASE ( self: str) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = DDIMScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx")
__lowerCAmelCase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = "open neural network exchange"
__lowerCAmelCase : Union[str, Any] = np.random.RandomState(0)
__lowerCAmelCase : List[str] = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_SCREAMING_SNAKE_CASE , output_type="np")
__lowerCAmelCase : Tuple = output.images
__lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCAmelCase : Optional[Any] = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> str:
"""simple docstring"""
__lowerCAmelCase : Tuple = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx")
__lowerCAmelCase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = "open neural network exchange"
__lowerCAmelCase : Any = np.random.RandomState(0)
__lowerCAmelCase : int = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_SCREAMING_SNAKE_CASE , output_type="np")
__lowerCAmelCase : Optional[Any] = output.images
__lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCAmelCase : List[Any] = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def _SCREAMING_SNAKE_CASE ( self: str) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : str = 0
def test_callback_fn(_SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: np.ndarray) -> None:
__lowerCAmelCase : Optional[int] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
__lowerCAmelCase : Optional[int] = latents[0, -3:, -3:, -1]
__lowerCAmelCase : List[str] = np.array(
[-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
__lowerCAmelCase : Tuple = latents[0, -3:, -3:, -1]
__lowerCAmelCase : Any = np.array(
[-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1e-3
__lowerCAmelCase : Dict = False
__lowerCAmelCase : Dict = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = "Andromeda galaxy in a bottle"
__lowerCAmelCase : Any = np.random.RandomState(0)
pipe(
prompt=_SCREAMING_SNAKE_CASE , num_inference_steps=5 , guidance_scale=7.5 , generator=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def _SCREAMING_SNAKE_CASE ( self: str) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
assert pipe.safety_checker is None
__lowerCAmelCase : Optional[Any] = pipe("example prompt" , num_inference_steps=2).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE)
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__lowerCAmelCase : Optional[Any] = pipe("example prompt" , num_inference_steps=2).images[0]
assert image is not None | 615 | 0 |
'''simple docstring'''
import numpy as np
def A (__lowerCamelCase :np.array ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : List[Any] =logging.get_logger(__name__)
_UpperCamelCase : Dict ={
'asapp/sew-tiny-100k': 'https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json',
# See all SEW models at https://huggingface.co/models?filter=sew
}
class UpperCAmelCase__ ( __snake_case ):
__snake_case : Tuple = "sew"
def __init__( self ,A__=32 ,A__=768 ,A__=12 ,A__=12 ,A__=3072 ,A__=2 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=0.1 ,A__=0.0 ,A__=0.1 ,A__=0.1 ,A__=0.02 ,A__=1E-5 ,A__="group" ,A__="gelu" ,A__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) ,A__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) ,A__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) ,A__=False ,A__=128 ,A__=16 ,A__=True ,A__=0.05 ,A__=10 ,A__=2 ,A__=0.0 ,A__=10 ,A__=0 ,A__="mean" ,A__=False ,A__=False ,A__=256 ,A__=0 ,A__=1 ,A__=2 ,**A__ ,):
super().__init__(**A__ ,pad_token_id=A__ ,bos_token_id=A__ ,eos_token_id=A__ )
_A : str = hidden_size
_A : Optional[Any] = feat_extract_norm
_A : Any = feat_extract_activation
_A : Optional[int] = list(A__ )
_A : str = list(A__ )
_A : Optional[Any] = list(A__ )
_A : List[Any] = conv_bias
_A : Optional[int] = num_conv_pos_embeddings
_A : Any = num_conv_pos_embedding_groups
_A : Union[str, Any] = len(self.conv_dim )
_A : List[Any] = num_hidden_layers
_A : str = intermediate_size
_A : List[Any] = squeeze_factor
_A : Tuple = hidden_act
_A : Union[str, Any] = num_attention_heads
_A : Optional[int] = hidden_dropout
_A : Optional[int] = attention_dropout
_A : Optional[int] = activation_dropout
_A : Tuple = feat_proj_dropout
_A : Optional[Any] = final_dropout
_A : str = layerdrop
_A : int = layer_norm_eps
_A : List[str] = initializer_range
_A : List[Any] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_A : Union[str, Any] = apply_spec_augment
_A : str = mask_time_prob
_A : Any = mask_time_length
_A : int = mask_time_min_masks
_A : str = mask_feature_prob
_A : Optional[Any] = mask_feature_length
_A : Dict = mask_feature_min_masks
# ctc loss
_A : Optional[int] = ctc_loss_reduction
_A : List[str] = ctc_zero_infinity
# sequence classification
_A : List[Any] = use_weighted_layer_sum
_A : Optional[int] = classifier_proj_size
@property
def A__ ( self ):
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 206 | 0 |
"""simple docstring"""
lowerCAmelCase__ = [
(1_000, '''M'''),
(900, '''CM'''),
(500, '''D'''),
(400, '''CD'''),
(100, '''C'''),
(90, '''XC'''),
(50, '''L'''),
(40, '''XL'''),
(10, '''X'''),
(9, '''IX'''),
(5, '''V'''),
(4, '''IV'''),
(1, '''I'''),
]
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase : List[str] = {"I": 1, "V": 5, "X": 1_0, "L": 5_0, "C": 1_0_0, "D": 5_0_0, "M": 1_0_0_0}
lowerCAmelCase : str = 0
lowerCAmelCase : Optional[Any] = 0
while place < len(SCREAMING_SNAKE_CASE ):
if (place + 1 < len(SCREAMING_SNAKE_CASE )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : str = []
for arabic, roman in ROMAN:
(lowerCAmelCase) : int = divmod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
result.append(roman * factor )
if number == 0:
break
return "".join(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
"""simple docstring"""
from typing import Any
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = data
lowerCAmelCase : Any = None
def __repr__( self ):
"""simple docstring"""
return f"""Node({self.data})"""
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = None
def __iter__( self ):
"""simple docstring"""
lowerCAmelCase : Any = self.head
while node:
yield node.data
lowerCAmelCase : Optional[int] = node.next
def __len__( self ):
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self ):
"""simple docstring"""
return "->".join([str(snake_case__ ) for item in self] )
def __getitem__( self , snake_case__ ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , snake_case__ , snake_case__ ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
lowerCAmelCase : Union[str, Any] = self.head
for _ in range(snake_case__ ):
lowerCAmelCase : int = current.next
lowerCAmelCase : List[str] = data
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
self.insert_nth(len(self ) , snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
self.insert_nth(0 , snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
lowerCAmelCase : Optional[int] = Node(snake_case__ )
if self.head is None:
lowerCAmelCase : Any = new_node
elif index == 0:
lowerCAmelCase : Any = self.head # link new_node to head
lowerCAmelCase : Union[str, Any] = new_node
else:
lowerCAmelCase : List[str] = self.head
for _ in range(index - 1 ):
lowerCAmelCase : int = temp.next
lowerCAmelCase : int = temp.next
lowerCAmelCase : Dict = new_node
def lowercase__ ( self ): # print every node data
"""simple docstring"""
print(self )
def lowercase__ ( self ):
"""simple docstring"""
return self.delete_nth(0 )
def lowercase__ ( self ): # delete from tail
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def lowercase__ ( self , snake_case__ = 0 ):
"""simple docstring"""
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
lowerCAmelCase : List[Any] = self.head # default first node
if index == 0:
lowerCAmelCase : Optional[int] = self.head.next
else:
lowerCAmelCase : List[str] = self.head
for _ in range(index - 1 ):
lowerCAmelCase : Union[str, Any] = temp.next
lowerCAmelCase : Optional[Any] = temp.next
lowerCAmelCase : Any = temp.next.next
return delete_node.data
def lowercase__ ( self ):
"""simple docstring"""
return self.head is None
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = None
lowerCAmelCase : Optional[int] = self.head
while current:
# Store the current node's next node.
lowerCAmelCase : List[Any] = current.next
# Make the current node's next point backwards
lowerCAmelCase : Dict = prev
# Make the previous node be the current node
lowerCAmelCase : List[str] = current
# Make the current node the next node (to progress iteration)
lowerCAmelCase : int = next_node
# Return prev in order to put the head at the end
lowerCAmelCase : Tuple = prev
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Tuple = LinkedList()
assert linked_list.is_empty() is True
assert str(SCREAMING_SNAKE_CASE ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(1_0 ):
assert len(SCREAMING_SNAKE_CASE ) == i
linked_list.insert_nth(SCREAMING_SNAKE_CASE , i + 1 )
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(1 , 1_1 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(1_1 )
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(0 , 1_2 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 1_0
assert linked_list.delete_tail() == 1_1
assert len(SCREAMING_SNAKE_CASE ) == 9
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(1 , 1_0 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowerCAmelCase : Optional[Any] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(-8 , 1 ) )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : List[str] = [
-9,
1_0_0,
Node(7_7_3_4_5_1_1_2 ),
"dlrow olleH",
7,
5_5_5_5,
0,
-192.55_555,
"Hello, world!",
77.9,
Node(1_0 ),
None,
None,
12.20,
]
lowerCAmelCase : List[str] = LinkedList()
for i in test_input:
linked_list.insert_tail(SCREAMING_SNAKE_CASE )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(SCREAMING_SNAKE_CASE ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowerCAmelCase : str = linked_list.delete_head()
assert result == -9
assert (
str(SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowerCAmelCase : Union[str, Any] = linked_list.delete_tail()
assert result == 12.2
assert (
str(SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowerCAmelCase : List[str] = linked_list.delete_nth(1_0 )
assert result is None
assert (
str(SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(SCREAMING_SNAKE_CASE )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(SCREAMING_SNAKE_CASE )
assert (
str(SCREAMING_SNAKE_CASE )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(SCREAMING_SNAKE_CASE )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def a__ ( ):
'''simple docstring'''
from doctest import testmod
testmod()
lowerCAmelCase : Optional[Any] = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(SCREAMING_SNAKE_CASE )
print("\nReading/changing Node data using indexing:" )
print(f"""Element at Position 1: {linked_list[1]}""" )
lowerCAmelCase : Any = input("Enter New Value: " ).strip()
print("New list:" )
print(SCREAMING_SNAKE_CASE )
print(f"""length of linked_list is : {len(SCREAMING_SNAKE_CASE )}""" )
if __name__ == "__main__":
main()
| 681 | 0 |
'''simple docstring'''
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("1.0.0a"):
raise Exception("requires fairseq >= 1.0.0a")
logging.set_verbosity_info()
__lowerCAmelCase : int = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = "Hello world! cécé herlolip"
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : bool ):
"""simple docstring"""
__UpperCAmelCase = FairseqRobertaModel.from_pretrained(UpperCamelCase__ )
roberta.eval() # disable dropout
__UpperCAmelCase = roberta.model.encoder.sentence_encoder
__UpperCAmelCase = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
__UpperCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our RoBERTa config:''' , UpperCamelCase__ )
__UpperCAmelCase = XLMRobertaXLForSequenceClassification(UpperCamelCase__ ) if classification_head else XLMRobertaXLForMaskedLM(UpperCamelCase__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
__UpperCAmelCase = roberta_sent_encoder.embed_tokens.weight
__UpperCAmelCase = roberta_sent_encoder.embed_positions.weight
__UpperCAmelCase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
__UpperCAmelCase = roberta_sent_encoder.layer_norm.weight
__UpperCAmelCase = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__UpperCAmelCase = model.roberta.encoder.layer[i]
__UpperCAmelCase = roberta_sent_encoder.layers[i]
__UpperCAmelCase = layer.attention
__UpperCAmelCase = roberta_layer.self_attn_layer_norm.weight
__UpperCAmelCase = roberta_layer.self_attn_layer_norm.bias
# self attention
__UpperCAmelCase = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
__UpperCAmelCase = roberta_layer.self_attn.q_proj.weight
__UpperCAmelCase = roberta_layer.self_attn.q_proj.bias
__UpperCAmelCase = roberta_layer.self_attn.k_proj.weight
__UpperCAmelCase = roberta_layer.self_attn.k_proj.bias
__UpperCAmelCase = roberta_layer.self_attn.v_proj.weight
__UpperCAmelCase = roberta_layer.self_attn.v_proj.bias
# self-attention output
__UpperCAmelCase = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
__UpperCAmelCase = roberta_layer.self_attn.out_proj.weight
__UpperCAmelCase = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
__UpperCAmelCase = roberta_layer.final_layer_norm.weight
__UpperCAmelCase = roberta_layer.final_layer_norm.bias
# intermediate
__UpperCAmelCase = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
__UpperCAmelCase = roberta_layer.fca.weight
__UpperCAmelCase = roberta_layer.fca.bias
# output
__UpperCAmelCase = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
__UpperCAmelCase = roberta_layer.fca.weight
__UpperCAmelCase = roberta_layer.fca.bias
# end of layer
if classification_head:
__UpperCAmelCase = roberta.model.classification_heads['''mnli'''].dense.weight
__UpperCAmelCase = roberta.model.classification_heads['''mnli'''].dense.bias
__UpperCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.weight
__UpperCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
__UpperCAmelCase = roberta.model.encoder.lm_head.dense.weight
__UpperCAmelCase = roberta.model.encoder.lm_head.dense.bias
__UpperCAmelCase = roberta.model.encoder.lm_head.layer_norm.weight
__UpperCAmelCase = roberta.model.encoder.lm_head.layer_norm.bias
__UpperCAmelCase = roberta.model.encoder.lm_head.weight
__UpperCAmelCase = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
__UpperCAmelCase = roberta.encode(UpperCamelCase__ ).unsqueeze(0 ) # batch of size 1
__UpperCAmelCase = model(UpperCamelCase__ )[0]
if classification_head:
__UpperCAmelCase = roberta.model.classification_heads['''mnli'''](roberta.extract_features(UpperCamelCase__ ) )
else:
__UpperCAmelCase = roberta.model(UpperCamelCase__ )[0]
print(our_output.shape , their_output.shape )
__UpperCAmelCase = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
__UpperCAmelCase = torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
pathlib.Path(UpperCamelCase__ ).mkdir(parents=UpperCamelCase__ , exist_ok=UpperCamelCase__ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
__lowerCAmelCase : Dict = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 262 | '''simple docstring'''
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def lowerCAmelCase ( UpperCamelCase__ : BertModel , UpperCamelCase__ : str , UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
__UpperCAmelCase = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(UpperCamelCase__ ):
os.makedirs(UpperCamelCase__ )
__UpperCAmelCase = model.state_dict()
def to_tf_var_name(UpperCamelCase__ : str ):
for patt, repl in iter(UpperCamelCase__ ):
__UpperCAmelCase = name.replace(UpperCamelCase__ , UpperCamelCase__ )
return f"""bert/{name}"""
def create_tf_var(UpperCamelCase__ : np.ndarray , UpperCamelCase__ : str , UpperCamelCase__ : tf.Session ):
__UpperCAmelCase = tf.dtypes.as_dtype(tensor.dtype )
__UpperCAmelCase = tf.get_variable(dtype=UpperCamelCase__ , shape=tensor.shape , name=UpperCamelCase__ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(UpperCamelCase__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__UpperCAmelCase = to_tf_var_name(UpperCamelCase__ )
__UpperCAmelCase = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__UpperCAmelCase = torch_tensor.T
__UpperCAmelCase = create_tf_var(tensor=UpperCamelCase__ , name=UpperCamelCase__ , session=UpperCamelCase__ )
tf.keras.backend.set_value(UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = session.run(UpperCamelCase__ )
print(f"""Successfully created {tf_name}: {np.allclose(UpperCamelCase__ , UpperCamelCase__ )}""" )
__UpperCAmelCase = tf.train.Saver(tf.trainable_variables() )
saver.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def lowerCAmelCase ( UpperCamelCase__ : List[str]=None ):
"""simple docstring"""
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=UpperCamelCase__ , required=UpperCamelCase__ , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=UpperCamelCase__ , default=UpperCamelCase__ , required=UpperCamelCase__ , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=UpperCamelCase__ , required=UpperCamelCase__ , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=UpperCamelCase__ , required=UpperCamelCase__ , help='''Directory in which to save tensorflow model''' )
__UpperCAmelCase = parser.parse_args(UpperCamelCase__ )
__UpperCAmelCase = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=UpperCamelCase__ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 262 | 1 |
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('.')
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '
f'''{test_file} instead.''' )
UpperCAmelCase__ = components[-1]
if not test_fn.endswith('py' ):
raise ValueError(f'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith('test_modeling_' ):
raise ValueError(
f'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
UpperCAmelCase__ = components[:-1] + [test_fn.replace('.py' , '' )]
UpperCAmelCase__ = '.'.join(lowerCamelCase )
return test_module_path
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = get_module_path(lowerCamelCase )
UpperCAmelCase__ = importlib.import_module(lowerCamelCase )
return test_module
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = []
UpperCAmelCase__ = get_test_module(lowerCamelCase )
for attr in dir(lowerCamelCase ):
if attr.endswith('ModelTester' ):
tester_classes.append(getattr(lowerCamelCase , lowerCamelCase ) )
# sort with class names
return sorted(lowerCamelCase , key=lambda lowerCamelCase : x.__name__ )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = []
UpperCAmelCase__ = get_test_module(lowerCamelCase )
for attr in dir(lowerCamelCase ):
UpperCAmelCase__ = getattr(lowerCamelCase , lowerCamelCase )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
UpperCAmelCase__ = getattr(lowerCamelCase , 'all_model_classes' , [] )
if len(lowerCamelCase ) > 0:
test_classes.append(lowerCamelCase )
# sort with class names
return sorted(lowerCamelCase , key=lambda lowerCamelCase : x.__name__ )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = get_test_classes(lowerCamelCase )
UpperCAmelCase__ = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(lowerCamelCase , key=lambda lowerCamelCase : x.__name__ )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = test_class()
if hasattr(lowerCamelCase , 'setUp' ):
test.setUp()
UpperCAmelCase__ = None
if hasattr(lowerCamelCase , 'model_tester' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
UpperCAmelCase__ = test.model_tester.__class__
return model_tester
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = get_test_classes(lowerCamelCase )
UpperCAmelCase__ = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(lowerCamelCase )
# sort with class names
return sorted(lowerCamelCase , key=lambda lowerCamelCase : x.__name__ )
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = get_test_classes_for_model(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = []
for test_class in test_classes:
UpperCAmelCase__ = get_model_tester_from_test_class(lowerCamelCase )
if tester_class is not None:
tester_classes.append(lowerCamelCase )
# sort with class names
return sorted(lowerCamelCase , key=lambda lowerCamelCase : x.__name__ )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = get_test_classes(lowerCamelCase )
UpperCAmelCase__ = {test_class: get_model_tester_from_test_class(lowerCamelCase ) for test_class in test_classes}
return test_tester_mapping
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = get_model_classes(lowerCamelCase )
UpperCAmelCase__ = {
model_class: get_test_classes_for_model(lowerCamelCase , lowerCamelCase ) for model_class in model_classes
}
return model_test_mapping
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = get_model_classes(lowerCamelCase )
UpperCAmelCase__ = {
model_class: get_tester_classes_for_model(lowerCamelCase , lowerCamelCase ) for model_class in model_classes
}
return model_to_tester_mapping
def a_ ( lowerCamelCase ):
if isinstance(lowerCamelCase , lowerCamelCase ):
return o
elif isinstance(lowerCamelCase , lowerCamelCase ):
return o.__name__
elif isinstance(lowerCamelCase , (list, tuple) ):
return [to_json(lowerCamelCase ) for x in o]
elif isinstance(lowerCamelCase , lowerCamelCase ):
return {to_json(lowerCamelCase ): to_json(lowerCamelCase ) for k, v in o.items()}
else:
return o
| 632 | """simple docstring"""
from __future__ import annotations
class snake_case :
"""simple docstring"""
def __init__( self : Dict ,lowerCamelCase__ : list[list[int]] ):
UpperCAmelCase__ = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.' )
if len(lowerCamelCase__ ) != 0:
UpperCAmelCase__ = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(lowerCamelCase__ ) != cols:
raise error
for value in row:
if not isinstance(lowerCamelCase__ ,(int, float) ):
raise error
UpperCAmelCase__ = rows
else:
UpperCAmelCase__ = []
def __lowerCAmelCase ( self : Union[str, Any] ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def __lowerCAmelCase ( self : str ):
return len(self.rows )
@property
def __lowerCAmelCase ( self : List[Any] ):
return len(self.rows[0] )
@property
def __lowerCAmelCase ( self : Any ):
return (self.num_rows, self.num_columns)
@property
def __lowerCAmelCase ( self : Optional[int] ):
return self.order[0] == self.order[1]
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(lowerCamelCase__ )
def __lowerCAmelCase ( self : str ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def __lowerCAmelCase ( self : List[str] ):
return bool(self.determinant() )
def __lowerCAmelCase ( self : Any ,lowerCamelCase__ : int ,lowerCamelCase__ : int ):
UpperCAmelCase__ = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(lowerCamelCase__ ).determinant()
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : int ):
if (row + column) % 2 == 0:
return self.get_minor(lowerCamelCase__ ,lowerCamelCase__ )
return -1 * self.get_minor(lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Union[str, Any] ):
return Matrix(
[
[self.get_minor(lowerCamelCase__ ,lowerCamelCase__ ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def __lowerCAmelCase ( self : int ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse' )
return self.adjugate() * (1 / determinant)
def __repr__( self : Optional[Any] ):
return str(self.rows )
def __str__( self : List[str] ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(lowerCamelCase__ ) for value in row] ) + '.]'
for row in self.rows
] )
+ "]"
)
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : list[int] ,lowerCamelCase__ : int | None = None ):
UpperCAmelCase__ = TypeError('Row must be a list containing all ints and/or floats' )
if not isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
raise type_error
for value in row:
if not isinstance(lowerCamelCase__ ,(int, float) ):
raise type_error
if len(lowerCamelCase__ ) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix' )
if position is None:
self.rows.append(lowerCamelCase__ )
else:
UpperCAmelCase__ = self.rows[0:position] + [row] + self.rows[position:]
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : list[int] ,lowerCamelCase__ : int | None = None ):
UpperCAmelCase__ = TypeError(
'Column must be a list containing all ints and/or floats' )
if not isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
raise type_error
for value in column:
if not isinstance(lowerCamelCase__ ,(int, float) ):
raise type_error
if len(lowerCamelCase__ ) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix' )
if position is None:
UpperCAmelCase__ = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
UpperCAmelCase__ = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : List[Any] ,lowerCamelCase__ : object ):
if not isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : Any ,lowerCamelCase__ : object ):
return not self == other
def __neg__( self : Dict ):
return self * -1
def __add__( self : str ,lowerCamelCase__ : Matrix ):
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : List[str] ,lowerCamelCase__ : Matrix ):
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : List[str] ,lowerCamelCase__ : Matrix | int | float ):
if isinstance(lowerCamelCase__ ,(int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second' )
return Matrix(
[
[Matrix.dot_product(lowerCamelCase__ ,lowerCamelCase__ ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix' )
def __pow__( self : Optional[int] ,lowerCamelCase__ : int ):
if not isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
raise TypeError('A Matrix can only be raised to the power of an int' )
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power' )
UpperCAmelCase__ = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def __lowerCAmelCase ( cls : Union[str, Any] ,lowerCamelCase__ : list[int] ,lowerCamelCase__ : list[int] ):
return sum(row[i] * column[i] for i in range(len(lowerCamelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 632 | 1 |
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def lowercase ( __A : Union[str, Any] ) -> Any:
'''simple docstring'''
if "cls_token" in name:
snake_case : List[str] = name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
snake_case : Dict = name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
snake_case : List[str] = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
snake_case : Optional[Any] = name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
snake_case : Any = name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
snake_case : Optional[int] = name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
snake_case : Optional[Any] = name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
snake_case : int = name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
snake_case : Tuple = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
snake_case : Tuple = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
snake_case : Tuple = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
snake_case : List[str] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
snake_case : Dict = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
snake_case : Dict = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
snake_case : Union[str, Any] = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
snake_case : Tuple = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
snake_case : Optional[Any] = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
snake_case : List[Any] = name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
snake_case : Optional[Any] = name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def lowercase ( __A : Tuple , __A : Optional[int] ) -> Tuple:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
snake_case : Any = orig_state_dict.pop(__A )
if "qkv" in key:
snake_case : List[Any] = key.split(""".""" )
snake_case : int = int(key_split[1] )
if "decoder_blocks" in key:
snake_case : int = config.decoder_hidden_size
snake_case : Union[str, Any] = """decoder.decoder_layers."""
if "weight" in key:
snake_case : Optional[Any] = val[:dim, :]
snake_case : Tuple = val[dim : dim * 2, :]
snake_case : Optional[int] = val[-dim:, :]
elif "bias" in key:
snake_case : Union[str, Any] = val[:dim]
snake_case : int = val[dim : dim * 2]
snake_case : Optional[Any] = val[-dim:]
else:
snake_case : List[str] = config.hidden_size
snake_case : List[str] = """vit.encoder.layer."""
if "weight" in key:
snake_case : Any = val[:dim, :]
snake_case : int = val[dim : dim * 2, :]
snake_case : Union[str, Any] = val[-dim:, :]
elif "bias" in key:
snake_case : Optional[Any] = val[:dim]
snake_case : int = val[dim : dim * 2]
snake_case : Optional[int] = val[-dim:]
else:
snake_case : Optional[Any] = val
return orig_state_dict
def lowercase ( __A : Tuple , __A : str ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[int] = ViTMAEConfig()
if "large" in checkpoint_url:
snake_case : List[str] = 1024
snake_case : Optional[int] = 4096
snake_case : Optional[int] = 24
snake_case : Tuple = 16
elif "huge" in checkpoint_url:
snake_case : Dict = 14
snake_case : int = 1280
snake_case : Dict = 5120
snake_case : List[str] = 32
snake_case : Optional[Any] = 16
snake_case : str = ViTMAEForPreTraining(__A )
snake_case : Optional[int] = torch.hub.load_state_dict_from_url(__A , map_location="""cpu""" )["""model"""]
snake_case : Any = ViTMAEImageProcessor(size=config.image_size )
snake_case : Tuple = convert_state_dict(__A , __A )
model.load_state_dict(__A )
model.eval()
snake_case : Tuple = """https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
snake_case : Union[str, Any] = Image.open(requests.get(__A , stream=__A ).raw )
snake_case : Dict = ViTMAEImageProcessor(size=config.image_size )
snake_case : str = image_processor(images=__A , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
snake_case : List[str] = model(**__A )
snake_case : str = outputs.logits
if "large" in checkpoint_url:
snake_case : str = torch.tensor(
[[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] )
elif "huge" in checkpoint_url:
snake_case : List[Any] = torch.tensor(
[[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] )
else:
snake_case : Optional[int] = torch.tensor(
[[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , __A , atol=1E-4 )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__A )
if __name__ == "__main__":
__lowercase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__lowercase : Optional[Any] = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 36 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a ( __snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE : Optional[Any] = KandinskyImgaImgPipeline
SCREAMING_SNAKE_CASE : Tuple = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""]
SCREAMING_SNAKE_CASE : int = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
SCREAMING_SNAKE_CASE : List[str] = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
SCREAMING_SNAKE_CASE : Any = False
@property
def UpperCamelCase ( self : List[str] ) -> str:
return 32
@property
def UpperCamelCase ( self : Tuple ) -> Optional[Any]:
return 32
@property
def UpperCamelCase ( self : Optional[int] ) -> str:
return self.time_input_dim
@property
def UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
return self.time_input_dim * 4
@property
def UpperCamelCase ( self : int ) -> List[Any]:
return 100
@property
def UpperCamelCase ( self : Any ) -> List[Any]:
lowerCamelCase_ = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def UpperCamelCase ( self : str ) -> Optional[Any]:
torch.manual_seed(0 )
lowerCamelCase_ = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
lowerCamelCase_ = MultilingualCLIP(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = text_encoder.eval()
return text_encoder
@property
def UpperCamelCase ( self : int ) -> List[Any]:
torch.manual_seed(0 )
lowerCamelCase_ = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCamelCase_ = UNetaDConditionModel(**__SCREAMING_SNAKE_CASE )
return model
@property
def UpperCamelCase ( self : Tuple ) -> Optional[Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
torch.manual_seed(0 )
lowerCamelCase_ = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
lowerCamelCase_ = self.dummy_text_encoder
lowerCamelCase_ = self.dummy_tokenizer
lowerCamelCase_ = self.dummy_unet
lowerCamelCase_ = self.dummy_movq
lowerCamelCase_ = {
'num_train_timesteps': 1000,
'beta_schedule': 'linear',
'beta_start': 0.00_085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
lowerCamelCase_ = DDIMScheduler(**__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def UpperCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict=0 ) -> str:
lowerCamelCase_ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__SCREAMING_SNAKE_CASE )
# create init_image
lowerCamelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ = Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE ) ).convert('RGB' ).resize((256, 256) )
if str(__SCREAMING_SNAKE_CASE ).startswith('mps' ):
lowerCamelCase_ = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
lowerCamelCase_ = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = {
'prompt': 'horse',
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def UpperCamelCase ( self : Tuple ) -> Any:
lowerCamelCase_ = 'cpu'
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) )
lowerCamelCase_ = output.images
lowerCamelCase_ = pipe(
**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) , return_dict=__SCREAMING_SNAKE_CASE , )[0]
lowerCamelCase_ = image[0, -3:, -3:, -1]
lowerCamelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase_ = np.array(
[0.61_474_943, 0.6_073_539, 0.43_308_544, 0.5_928_269, 0.47_493_595, 0.46_755_973, 0.4_613_838, 0.45_368_797, 0.50_119_233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def UpperCamelCase ( self : Tuple ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : Any ) -> int:
lowerCamelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_img2img_frog.npy' )
lowerCamelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCamelCase_ = 'A red cartoon frog, 4k'
lowerCamelCase_ = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa )
pipe_prior.to(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = KandinskyImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1' , torch_dtype=torch.floataa )
lowerCamelCase_ = pipeline.to(__SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase_ , lowerCamelCase_ = pipe_prior(
__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
lowerCamelCase_ = pipeline(
__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , image_embeds=__SCREAMING_SNAKE_CASE , negative_image_embeds=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='np' , )
lowerCamelCase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 549 | 0 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class _a ( UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = CpmAntTokenizer
A_ = False
def _UpperCAmelCase ( self ) -> int:
super().setUp()
UpperCamelCase_ = [
'<d>',
'</d>',
'<s>',
'</s>',
'</_>',
'<unk>',
'<pad>',
'</n>',
'我',
'是',
'C',
'P',
'M',
'A',
'n',
't',
]
UpperCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
@tooslow
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = CpmAntTokenizer.from_pretrained('openbmb/cpm-ant-10b' )
UpperCamelCase_ = '今天天气真好!'
UpperCamelCase_ = ['今天', '天气', '真', '好', '!']
UpperCamelCase_ = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ = '今天天气真好!'
UpperCamelCase_ = [tokenizer.bos_token] + tokens
UpperCamelCase_ = [6, 9802, 14962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
UpperCamelCase_ = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
| 618 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
snake_case__ : str = logging.getLogger(__name__)
@dataclass(frozen=UpperCAmelCase__ )
class _a :
"""simple docstring"""
A_ = 42
A_ = 42
A_ = None
A_ = None
A_ = None
@dataclass(frozen=UpperCAmelCase__ )
class _a :
"""simple docstring"""
A_ = 42
A_ = None
A_ = None
A_ = None
A_ = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = 42
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase=False , _UpperCAmelCase = False , ) -> Any:
UpperCamelCase_ = hans_processors[task]()
UpperCamelCase_ = os.path.join(
_UpperCAmelCase , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(_UpperCAmelCase ) , _UpperCAmelCase , ) , )
UpperCamelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase_ , UpperCamelCase_ = label_list[2], label_list[1]
UpperCamelCase_ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCamelCase_ = cached_features_file + '.lock'
with FileLock(_UpperCAmelCase ):
if os.path.exists(_UpperCAmelCase ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
UpperCamelCase_ = torch.load(_UpperCAmelCase )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
UpperCamelCase_ = (
processor.get_dev_examples(_UpperCAmelCase ) if evaluate else processor.get_train_examples(_UpperCAmelCase )
)
logger.info('Training examples: %s' , len(_UpperCAmelCase ) )
UpperCamelCase_ = hans_convert_examples_to_features(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
logger.info('Saving features into cached file %s' , _UpperCAmelCase )
torch.save(self.features , _UpperCAmelCase )
def __len__( self ) -> Dict:
return len(self.features )
def __getitem__( self , _UpperCAmelCase ) -> InputFeatures:
return self.features[i]
def _UpperCAmelCase ( self ) -> Dict:
return self.label_list
if is_tf_available():
import tensorflow as tf
class _a :
"""simple docstring"""
A_ = 42
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 128 , _UpperCAmelCase=False , _UpperCAmelCase = False , ) -> int:
UpperCamelCase_ = hans_processors[task]()
UpperCamelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase_ , UpperCamelCase_ = label_list[2], label_list[1]
UpperCamelCase_ = label_list
UpperCamelCase_ = processor.get_dev_examples(_UpperCAmelCase ) if evaluate else processor.get_train_examples(_UpperCAmelCase )
UpperCamelCase_ = hans_convert_examples_to_features(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 10000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(_UpperCAmelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCamelCase_ = tf.data.Dataset.from_generator(
_UpperCAmelCase , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _UpperCAmelCase ( self ) -> List[str]:
return self.dataset
def __len__( self ) -> str:
return len(self.features )
def __getitem__( self , _UpperCAmelCase ) -> InputFeatures:
return self.features[i]
def _UpperCAmelCase ( self ) -> int:
return self.label_list
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Tuple:
return self._create_examples(self._read_tsv(os.path.join(_UpperCAmelCase , 'heuristics_train_set.txt' ) ) , 'train' )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Dict:
return self._create_examples(self._read_tsv(os.path.join(_UpperCAmelCase , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def _UpperCAmelCase ( self ) -> List[Any]:
return ["contradiction", "entailment", "neutral"]
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = []
for i, line in enumerate(_UpperCAmelCase ):
if i == 0:
continue
UpperCamelCase_ = '%s-%s' % (set_type, line[0])
UpperCamelCase_ = line[5]
UpperCamelCase_ = line[6]
UpperCamelCase_ = line[7][2:] if line[7].startswith('ex' ) else line[7]
UpperCamelCase_ = line[0]
examples.append(InputExample(guid=_UpperCAmelCase , text_a=_UpperCAmelCase , text_b=_UpperCAmelCase , label=_UpperCAmelCase , pairID=_UpperCAmelCase ) )
return examples
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , ):
UpperCamelCase_ = {label: i for i, label in enumerate(__lowercase)}
UpperCamelCase_ = []
for ex_index, example in tqdm.tqdm(enumerate(__lowercase) , desc='convert examples to features'):
if ex_index % 10000 == 0:
logger.info('Writing example %d' % (ex_index))
UpperCamelCase_ = tokenizer(
example.text_a , example.text_b , add_special_tokens=__lowercase , max_length=__lowercase , padding='max_length' , truncation=__lowercase , return_overflowing_tokens=__lowercase , )
UpperCamelCase_ = label_map[example.label] if example.label in label_map else 0
UpperCamelCase_ = int(example.pairID)
features.append(InputFeatures(**__lowercase , label=__lowercase , pairID=__lowercase))
for i, example in enumerate(examples[:5]):
logger.info('*** Example ***')
logger.info(f"""guid: {example}""")
logger.info(f"""features: {features[i]}""")
return features
snake_case__ : List[str] = {
"""hans""": 3,
}
snake_case__ : Union[str, Any] = {
"""hans""": HansProcessor,
}
| 618 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = ConsistencyModelPipeline
lowerCAmelCase_ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowerCAmelCase_ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
lowerCAmelCase_ = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'output_type',
'return_dict',
'callback',
'callback_steps',
] )
@property
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : str = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test",subfolder="test_unet",)
return unet
@property
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : List[str] = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test",subfolder="test_unet_class_cond",)
return unet
def lowerCamelCase_ ( self : Any,__A : List[Any]=False ):
if class_cond:
_lowerCamelCase : str = self.dummy_cond_unet
else:
_lowerCamelCase : int = self.dummy_uncond_unet
# Default to CM multistep sampler
_lowerCamelCase : int = CMStochasticIterativeScheduler(
num_train_timesteps=4_0,sigma_min=0.002,sigma_max=80.0,)
_lowerCamelCase : List[str] = {
"unet": unet,
"scheduler": scheduler,
}
return components
def lowerCamelCase_ ( self : Optional[Any],__A : int,__A : Dict=0 ):
if str(a_ ).startswith("mps" ):
_lowerCamelCase : Union[str, Any] = torch.manual_seed(a_ )
else:
_lowerCamelCase : str = torch.Generator(device=a_ ).manual_seed(a_ )
_lowerCamelCase : str = {
"batch_size": 1,
"num_inference_steps": None,
"timesteps": [2_2, 0],
"generator": generator,
"output_type": "np",
}
return inputs
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase : List[Any] = self.get_dummy_components()
_lowerCamelCase : List[Any] = ConsistencyModelPipeline(**a_ )
_lowerCamelCase : Dict = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
_lowerCamelCase : Any = self.get_dummy_inputs(a_ )
_lowerCamelCase : Optional[int] = pipe(**a_ ).images
assert image.shape == (1, 3_2, 3_2, 3)
_lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
_lowerCamelCase : int = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : int = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase : List[str] = self.get_dummy_components(class_cond=a_ )
_lowerCamelCase : Any = ConsistencyModelPipeline(**a_ )
_lowerCamelCase : Union[str, Any] = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
_lowerCamelCase : List[Any] = self.get_dummy_inputs(a_ )
_lowerCamelCase : Tuple = 0
_lowerCamelCase : Dict = pipe(**a_ ).images
assert image.shape == (1, 3_2, 3_2, 3)
_lowerCamelCase : List[Any] = image[0, -3:, -3:, -1]
_lowerCamelCase : List[Any] = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : int = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase : str = self.get_dummy_components()
_lowerCamelCase : int = ConsistencyModelPipeline(**a_ )
_lowerCamelCase : int = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
_lowerCamelCase : Union[str, Any] = self.get_dummy_inputs(a_ )
_lowerCamelCase : Tuple = 1
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : Dict = pipe(**a_ ).images
assert image.shape == (1, 3_2, 3_2, 3)
_lowerCamelCase : Any = image[0, -3:, -3:, -1]
_lowerCamelCase : Dict = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase : Tuple = self.get_dummy_components(class_cond=a_ )
_lowerCamelCase : List[str] = ConsistencyModelPipeline(**a_ )
_lowerCamelCase : str = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
_lowerCamelCase : Tuple = self.get_dummy_inputs(a_ )
_lowerCamelCase : Optional[int] = 1
_lowerCamelCase : Union[str, Any] = None
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : int = pipe(**a_ ).images
assert image.shape == (1, 3_2, 3_2, 3)
_lowerCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
_lowerCamelCase : List[Any] = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : str ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : Tuple,__A : Dict=0,__A : Dict=False,__A : Tuple="cpu",__A : Union[str, Any]=torch.floataa,__A : str=(1, 3, 6_4, 6_4) ):
_lowerCamelCase : str = torch.manual_seed(a_ )
_lowerCamelCase : Dict = {
"num_inference_steps": None,
"timesteps": [2_2, 0],
"class_labels": 0,
"generator": generator,
"output_type": "np",
}
if get_fixed_latents:
_lowerCamelCase : Any = self.get_fixed_latents(seed=a_,device=a_,dtype=a_,shape=a_ )
_lowerCamelCase : int = latents
return inputs
def lowerCamelCase_ ( self : Dict,__A : Any=0,__A : Optional[int]="cpu",__A : int=torch.floataa,__A : Any=(1, 3, 6_4, 6_4) ):
if type(a_ ) == str:
_lowerCamelCase : Optional[int] = torch.device(a_ )
_lowerCamelCase : Optional[Any] = torch.Generator(device=a_ ).manual_seed(a_ )
_lowerCamelCase : Optional[Any] = randn_tensor(a_,generator=a_,device=a_,dtype=a_ )
return latents
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : Optional[int] = UNetaDModel.from_pretrained("diffusers/consistency_models",subfolder="diffusers_cd_imagenet64_l2" )
_lowerCamelCase : int = CMStochasticIterativeScheduler(
num_train_timesteps=4_0,sigma_min=0.002,sigma_max=80.0,)
_lowerCamelCase : Tuple = ConsistencyModelPipeline(unet=a_,scheduler=a_ )
pipe.to(torch_device=a_ )
pipe.set_progress_bar_config(disable=a_ )
_lowerCamelCase : List[str] = self.get_inputs()
_lowerCamelCase : Optional[int] = pipe(**a_ ).images
assert image.shape == (1, 6_4, 6_4, 3)
_lowerCamelCase : List[Any] = image[0, -3:, -3:, -1]
_lowerCamelCase : Tuple = np.array([0.0888, 0.0881, 0.0666, 0.0479, 0.0292, 0.0195, 0.0201, 0.0163, 0.0254] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Dict = UNetaDModel.from_pretrained("diffusers/consistency_models",subfolder="diffusers_cd_imagenet64_l2" )
_lowerCamelCase : Union[str, Any] = CMStochasticIterativeScheduler(
num_train_timesteps=4_0,sigma_min=0.002,sigma_max=80.0,)
_lowerCamelCase : Optional[int] = ConsistencyModelPipeline(unet=a_,scheduler=a_ )
pipe.to(torch_device=a_ )
pipe.set_progress_bar_config(disable=a_ )
_lowerCamelCase : List[Any] = self.get_inputs()
_lowerCamelCase : Optional[int] = 1
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : List[str] = pipe(**a_ ).images
assert image.shape == (1, 6_4, 6_4, 3)
_lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1]
_lowerCamelCase : List[Any] = np.array([0.0340, 0.0152, 0.0063, 0.0267, 0.0221, 0.0107, 0.0416, 0.0186, 0.0217] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : int = UNetaDModel.from_pretrained("diffusers/consistency_models",subfolder="diffusers_cd_imagenet64_l2" )
_lowerCamelCase : List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=4_0,sigma_min=0.002,sigma_max=80.0,)
_lowerCamelCase : Optional[Any] = ConsistencyModelPipeline(unet=a_,scheduler=a_ )
pipe.to(torch_device=a_,torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a_ )
_lowerCamelCase : Tuple = self.get_inputs(get_fixed_latents=a_,device=a_ )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a_,enable_math=a_,enable_mem_efficient=a_ ):
_lowerCamelCase : int = pipe(**a_ ).images
assert image.shape == (1, 6_4, 6_4, 3)
_lowerCamelCase : str = image[0, -3:, -3:, -1]
_lowerCamelCase : Any = np.array([0.1875, 0.1428, 0.1289, 0.2151, 0.2092, 0.1477, 0.1877, 0.1641, 0.1353] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained("diffusers/consistency_models",subfolder="diffusers_cd_imagenet64_l2" )
_lowerCamelCase : int = CMStochasticIterativeScheduler(
num_train_timesteps=4_0,sigma_min=0.002,sigma_max=80.0,)
_lowerCamelCase : List[Any] = ConsistencyModelPipeline(unet=a_,scheduler=a_ )
pipe.to(torch_device=a_,torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a_ )
_lowerCamelCase : int = self.get_inputs(get_fixed_latents=a_,device=a_ )
_lowerCamelCase : List[str] = 1
_lowerCamelCase : Optional[int] = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a_,enable_math=a_,enable_mem_efficient=a_ ):
_lowerCamelCase : Optional[Any] = pipe(**a_ ).images
assert image.shape == (1, 6_4, 6_4, 3)
_lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1]
_lowerCamelCase : Optional[Any] = np.array([0.1663, 0.1948, 0.2275, 0.1680, 0.1204, 0.1245, 0.1858, 0.1338, 0.2095] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 | 44 |
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _A ( A ,A ) -> str:
lowercase : Optional[int] = old_name
if "patch_embed" in old_name:
lowercase , lowercase , lowercase : Tuple = old_name.split("." )
if layer == "0":
lowercase : int = old_name.replace("0" ,"convolution1" )
elif layer == "1":
lowercase : List[str] = old_name.replace("1" ,"batchnorm_before" )
elif layer == "3":
lowercase : Dict = old_name.replace("3" ,"convolution2" )
else:
lowercase : Union[str, Any] = old_name.replace("4" ,"batchnorm_after" )
if "network" in old_name and re.search(r"\d\.\d" ,A ):
lowercase : List[str] = r"\b\d{2}\b"
if bool(re.search(A ,A ) ):
lowercase : str = re.search(r"\d\.\d\d." ,A ).group()
else:
lowercase : int = re.search(r"\d\.\d." ,A ).group()
if int(match[0] ) < 6:
lowercase : str = old_name.replace(A ,"" )
lowercase : List[str] = trimmed_name.replace("network" ,match[0] + ".meta4D_layers.blocks." + match[2:-1] )
lowercase : Optional[Any] = "intermediate_stages." + trimmed_name
else:
lowercase : str = old_name.replace(A ,"" )
if int(match[2] ) < num_meta4D_last_stage:
lowercase : Optional[int] = trimmed_name.replace("network" ,"meta4D_layers.blocks." + match[2] )
else:
lowercase : List[Any] = str(int(match[2] ) - num_meta4D_last_stage )
lowercase : List[Any] = trimmed_name.replace("network" ,"meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
lowercase : str = trimmed_name.replace("norm1" ,"layernorm1" )
elif "norm2" in old_name:
lowercase : Optional[Any] = trimmed_name.replace("norm2" ,"layernorm2" )
elif "fc1" in old_name:
lowercase : Optional[int] = trimmed_name.replace("fc1" ,"linear_in" )
elif "fc2" in old_name:
lowercase : str = trimmed_name.replace("fc2" ,"linear_out" )
lowercase : Dict = "last_stage." + trimmed_name
elif "network" in old_name and re.search(r".\d." ,A ):
lowercase : Union[str, Any] = old_name.replace("network" ,"intermediate_stages" )
if "fc" in new_name:
lowercase : Any = new_name.replace("fc" ,"convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
lowercase : Optional[Any] = new_name.replace("norm1" ,"batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
lowercase : List[str] = new_name.replace("norm2" ,"batchnorm_after" )
if "proj" in new_name:
lowercase : Optional[int] = new_name.replace("proj" ,"projection" )
if "dist_head" in new_name:
lowercase : Tuple = new_name.replace("dist_head" ,"distillation_classifier" )
elif "head" in new_name:
lowercase : Tuple = new_name.replace("head" ,"classifier" )
elif "patch_embed" in new_name:
lowercase : Optional[int] = "efficientformer." + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
lowercase : str = new_name.replace("norm" ,"layernorm" )
lowercase : List[Any] = "efficientformer." + new_name
else:
lowercase : Optional[Any] = "efficientformer.encoder." + new_name
return new_name
def _A ( A ,A ) -> Optional[Any]:
for key in checkpoint.copy().keys():
lowercase : List[str] = checkpoint.pop(A )
lowercase : int = val
return checkpoint
def _A ( ) -> Optional[int]:
lowercase : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase : Optional[Any] = Image.open(requests.get(A ,stream=A ).raw )
return image
def _A ( A ,A ,A ,A ) -> List[Any]:
lowercase : Optional[int] = torch.load(A ,map_location="cpu" )["model"]
lowercase : int = EfficientFormerConfig.from_json_file(A )
lowercase : Tuple = EfficientFormerForImageClassificationWithTeacher(A )
lowercase : int = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
lowercase : Optional[int] = config.depths[-1] - config.num_metaad_blocks + 1
lowercase : int = convert_torch_checkpoint(A ,A )
model.load_state_dict(A )
model.eval()
lowercase : List[Any] = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
# prepare image
lowercase : Tuple = prepare_img()
lowercase : Optional[int] = 2_5_6
lowercase : str = 2_2_4
lowercase : List[str] = EfficientFormerImageProcessor(
size={"shortest_edge": image_size} ,crop_size={"height": crop_size, "width": crop_size} ,resample=pillow_resamplings["bicubic"] ,)
lowercase : Union[str, Any] = processor(images=A ,return_tensors="pt" ).pixel_values
# original processing pipeline
lowercase : Tuple = Compose(
[
Resize(A ,interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(A ),
ToTensor(),
Normalize(A ,A ),
] )
lowercase : List[Any] = image_transforms(A ).unsqueeze(0 )
assert torch.allclose(A ,A )
lowercase : Union[str, Any] = model(A )
lowercase : Any = outputs.logits
lowercase : List[str] = (1, 1_0_0_0)
if "l1" in model_name:
lowercase : Any = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :1_0] ,A ,atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
lowercase : List[Any] = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :1_0] ,A ,atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
lowercase : Optional[int] = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(A ).mkdir(exist_ok=A )
model.save_pretrained(A )
print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(A )
print(F'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=F'''Bearnardd/{pytorch_dump_path}''' ,commit_message="Add model" ,use_temp_dir=A ,)
processor.push_to_hub(
repo_id=F'''Bearnardd/{pytorch_dump_path}''' ,commit_message="Add image processor" ,use_temp_dir=A ,)
if __name__ == "__main__":
lowerCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""",
default=None,
type=str,
required=True,
help="""Path to EfficientFormer pytorch checkpoint.""",
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for EfficientFormer model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
parser.set_defaults(push_to_hub=True)
lowerCAmelCase : Optional[int] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 372 | 0 |
'''simple docstring'''
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class _lowercase ( _A ):
def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=False , a=True , a=9_9 , a=3_2 , a=5 , a=4 , a=6_4 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=3 , a=4 , a=None , a=2 , a=2 , a=2 , a=2 , a=4 , a=1 , ):
snake_case__ : Union[str, Any] =parent
snake_case__ : Optional[int] =batch_size
snake_case__ : int =seq_length
snake_case__ : Union[str, Any] =is_training
snake_case__ : List[Any] =use_input_mask
snake_case__ : Optional[Any] =use_token_type_ids
snake_case__ : Dict =use_labels
snake_case__ : Any =vocab_size
snake_case__ : str =hidden_size
snake_case__ : Optional[Any] =num_hidden_layers
snake_case__ : Tuple =num_attention_heads
snake_case__ : Optional[int] =intermediate_size
snake_case__ : str =hidden_act
snake_case__ : int =hidden_dropout_prob
snake_case__ : Tuple =attention_probs_dropout_prob
snake_case__ : List[Any] =max_position_embeddings
snake_case__ : Optional[int] =type_vocab_size
snake_case__ : Dict =type_sequence_label_size
snake_case__ : int =initializer_range
snake_case__ : Any =num_labels
snake_case__ : Dict =num_choices
snake_case__ : Tuple =scope
snake_case__ : int =q_groups
snake_case__ : str =k_groups
snake_case__ : int =v_groups
snake_case__ : List[str] =post_attention_groups
snake_case__ : Union[str, Any] =intermediate_groups
snake_case__ : Dict =output_groups
def lowercase__ ( self ):
snake_case__ : Any =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Union[str, Any] =None
if self.use_input_mask:
snake_case__ : Optional[int] =random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : List[Any] =None
snake_case__ : int =None
snake_case__ : Tuple =None
if self.use_labels:
snake_case__ : Optional[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : Dict =ids_tensor([self.batch_size] , self.num_choices )
snake_case__ : List[str] =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self ):
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def lowercase__ ( self , a , a , a , a , a , a ):
snake_case__ : Any =SqueezeBertModel(config=a )
model.to(a )
model.eval()
snake_case__ : List[Any] =model(a , a )
snake_case__ : Dict =model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self , a , a , a , a , a , a ):
snake_case__ : str =SqueezeBertForMaskedLM(config=a )
model.to(a )
model.eval()
snake_case__ : Optional[int] =model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self , a , a , a , a , a , a ):
snake_case__ : Any =SqueezeBertForQuestionAnswering(config=a )
model.to(a )
model.eval()
snake_case__ : int =model(
a , attention_mask=a , start_positions=a , end_positions=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self , a , a , a , a , a , a ):
snake_case__ : str =self.num_labels
snake_case__ : List[Any] =SqueezeBertForSequenceClassification(a )
model.to(a )
model.eval()
snake_case__ : Tuple =model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self , a , a , a , a , a , a ):
snake_case__ : Union[str, Any] =self.num_labels
snake_case__ : Dict =SqueezeBertForTokenClassification(config=a )
model.to(a )
model.eval()
snake_case__ : Tuple =model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self , a , a , a , a , a , a ):
snake_case__ : Tuple =self.num_choices
snake_case__ : List[str] =SqueezeBertForMultipleChoice(config=a )
model.to(a )
model.eval()
snake_case__ : Optional[int] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : Optional[int] =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : Any =model(
a , attention_mask=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self ):
snake_case__ : List[str] =self.prepare_config_and_inputs()
(snake_case__) : Any =config_and_inputs
snake_case__ : Union[str, Any] ={"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( _A , _A , unittest.TestCase ):
_a : Optional[int] = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
_a : Tuple = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_a : int = False
_a : Dict = True
_a : List[str] = False
def lowercase__ ( self ):
snake_case__ : Any =SqueezeBertModelTester(self )
snake_case__ : Any =ConfigTester(self , config_class=a , dim=3_7 )
def lowercase__ ( self ):
self.config_tester.run_common_tests()
def lowercase__ ( self ):
snake_case__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*a )
def lowercase__ ( self ):
snake_case__ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*a )
def lowercase__ ( self ):
snake_case__ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*a )
def lowercase__ ( self ):
snake_case__ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*a )
def lowercase__ ( self ):
snake_case__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*a )
def lowercase__ ( self ):
snake_case__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*a )
@slow
def lowercase__ ( self ):
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Tuple =SqueezeBertModel.from_pretrained(a )
self.assertIsNotNone(a )
@require_sentencepiece
@require_tokenizers
@require_torch
class _lowercase ( unittest.TestCase ):
@slow
def lowercase__ ( self ):
snake_case__ : int =SqueezeBertForSequenceClassification.from_pretrained("""squeezebert/squeezebert-mnli""" )
snake_case__ : Union[str, Any] =torch.tensor([[1, 2_9_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 1_3, 1_5_8_8, 2]] )
snake_case__ : Union[str, Any] =model(a )[0]
snake_case__ : str =torch.Size((1, 3) )
self.assertEqual(output.shape , a )
snake_case__ : Union[str, Any] =torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(a , a , atol=1e-4 ) )
| 708 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowercase ( _A , unittest.TestCase ):
_a : Optional[Any] = UnCLIPImageVariationPipeline
_a : Optional[int] = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
_a : Optional[Any] = IMAGE_VARIATION_BATCH_PARAMS
_a : str = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
_a : Any = False
@property
def lowercase__ ( self ):
return 3_2
@property
def lowercase__ ( self ):
return 3_2
@property
def lowercase__ ( self ):
return self.time_input_dim
@property
def lowercase__ ( self ):
return self.time_input_dim * 4
@property
def lowercase__ ( self ):
return 1_0_0
@property
def lowercase__ ( self ):
snake_case__ : Dict =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def lowercase__ ( self ):
torch.manual_seed(0 )
snake_case__ : int =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(a )
@property
def lowercase__ ( self ):
torch.manual_seed(0 )
snake_case__ : Optional[int] =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=3_2 , intermediate_size=3_7 , patch_size=1 , )
return CLIPVisionModelWithProjection(a )
@property
def lowercase__ ( self ):
torch.manual_seed(0 )
snake_case__ : int ={
"""clip_embeddings_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""cross_attention_dim""": self.cross_attention_dim,
}
snake_case__ : Optional[int] =UnCLIPTextProjModel(**a )
return model
@property
def lowercase__ ( self ):
torch.manual_seed(0 )
snake_case__ : List[str] ={
"""sample_size""": 3_2,
# RGB in channels
"""in_channels""": 3,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 6,
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": """identity""",
}
snake_case__ : List[str] =UNetaDConditionModel(**a )
return model
@property
def lowercase__ ( self ):
return {
"sample_size": 6_4,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def lowercase__ ( self ):
torch.manual_seed(0 )
snake_case__ : Union[str, Any] =UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def lowercase__ ( self ):
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
snake_case__ : int =UNetaDModel(**self.dummy_super_res_kwargs )
return model
def lowercase__ ( self ):
snake_case__ : Union[str, Any] =self.dummy_decoder
snake_case__ : Any =self.dummy_text_proj
snake_case__ : Optional[int] =self.dummy_text_encoder
snake_case__ : Optional[Any] =self.dummy_tokenizer
snake_case__ : List[str] =self.dummy_super_res_first
snake_case__ : str =self.dummy_super_res_last
snake_case__ : List[Any] =UnCLIPScheduler(
variance_type="""learned_range""" , prediction_type="""epsilon""" , num_train_timesteps=1_0_0_0 , )
snake_case__ : Any =UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""epsilon""" , num_train_timesteps=1_0_0_0 , )
snake_case__ : str =CLIPImageProcessor(crop_size=3_2 , size=3_2 )
snake_case__ : List[Any] =self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def lowercase__ ( self , a , a=0 , a=True ):
snake_case__ : str =floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(a ) ).to(a )
if str(a ).startswith("""mps""" ):
snake_case__ : str =torch.manual_seed(a )
else:
snake_case__ : List[Any] =torch.Generator(device=a ).manual_seed(a )
if pil_image:
snake_case__ : Optional[int] =input_image * 0.5 + 0.5
snake_case__ : Tuple =input_image.clamp(0 , 1 )
snake_case__ : Union[str, Any] =input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
snake_case__ : Tuple =DiffusionPipeline.numpy_to_pil(a )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def lowercase__ ( self ):
snake_case__ : Union[str, Any] ="""cpu"""
snake_case__ : Dict =self.get_dummy_components()
snake_case__ : Tuple =self.pipeline_class(**a )
snake_case__ : Optional[Any] =pipe.to(a )
pipe.set_progress_bar_config(disable=a )
snake_case__ : Optional[int] =self.get_dummy_inputs(a , pil_image=a )
snake_case__ : List[str] =pipe(**a )
snake_case__ : List[str] =output.images
snake_case__ : Any =self.get_dummy_inputs(a , pil_image=a )
snake_case__ : List[Any] =pipe(
**a , return_dict=a , )[0]
snake_case__ : Union[str, Any] =image[0, -3:, -3:, -1]
snake_case__ : Optional[int] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case__ : Optional[Any] =np.array(
[
0.9997,
0.0002,
0.9997,
0.9997,
0.9969,
0.0023,
0.9997,
0.9969,
0.9970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self ):
snake_case__ : Dict ="""cpu"""
snake_case__ : int =self.get_dummy_components()
snake_case__ : Any =self.pipeline_class(**a )
snake_case__ : Tuple =pipe.to(a )
pipe.set_progress_bar_config(disable=a )
snake_case__ : int =self.get_dummy_inputs(a , pil_image=a )
snake_case__ : Any =pipe(**a )
snake_case__ : Optional[int] =output.images
snake_case__ : int =self.get_dummy_inputs(a , pil_image=a )
snake_case__ : List[str] =pipe(
**a , return_dict=a , )[0]
snake_case__ : List[str] =image[0, -3:, -3:, -1]
snake_case__ : List[Any] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case__ : str =np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self ):
snake_case__ : List[str] ="""cpu"""
snake_case__ : Dict =self.get_dummy_components()
snake_case__ : Tuple =self.pipeline_class(**a )
snake_case__ : List[Any] =pipe.to(a )
pipe.set_progress_bar_config(disable=a )
snake_case__ : Tuple =self.get_dummy_inputs(a , pil_image=a )
snake_case__ : Any =[
pipeline_inputs["""image"""],
pipeline_inputs["""image"""],
]
snake_case__ : Tuple =pipe(**a )
snake_case__ : str =output.images
snake_case__ : Tuple =self.get_dummy_inputs(a , pil_image=a )
snake_case__ : str =[
tuple_pipeline_inputs["""image"""],
tuple_pipeline_inputs["""image"""],
]
snake_case__ : Union[str, Any] =pipe(
**a , return_dict=a , )[0]
snake_case__ : List[Any] =image[0, -3:, -3:, -1]
snake_case__ : Tuple =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 6_4, 6_4, 3)
snake_case__ : int =np.array(
[
0.9997,
0.9989,
0.0008,
0.0021,
0.9960,
0.0018,
0.0014,
0.0002,
0.9933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self ):
snake_case__ : Optional[int] =torch.device("""cpu""" )
class _lowercase :
_a : Tuple = 1
snake_case__ : Optional[Any] =self.get_dummy_components()
snake_case__ : List[str] =self.pipeline_class(**a )
snake_case__ : int =pipe.to(a )
pipe.set_progress_bar_config(disable=a )
snake_case__ : List[Any] =torch.Generator(device=a ).manual_seed(0 )
snake_case__ : List[Any] =pipe.decoder.dtype
snake_case__ : str =1
snake_case__ : Dict =(
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
snake_case__ : List[Any] =pipe.prepare_latents(
a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() )
snake_case__ : Optional[int] =(
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
snake_case__ : Dict =pipe.prepare_latents(
a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() )
snake_case__ : Union[str, Any] =self.get_dummy_inputs(a , pil_image=a )
snake_case__ : List[str] =pipe(
**a , decoder_latents=a , super_res_latents=a ).images
snake_case__ : Any =self.get_dummy_inputs(a , pil_image=a )
# Don't pass image, instead pass embedding
snake_case__ : Optional[Any] =pipeline_inputs.pop("""image""" )
snake_case__ : Optional[int] =pipe.image_encoder(a ).image_embeds
snake_case__ : Dict =pipe(
**a , decoder_latents=a , super_res_latents=a , image_embeddings=a , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1e-4
@skip_mps
def lowercase__ ( self ):
snake_case__ : List[str] =torch_device == """cpu"""
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
snake_case__ : Tuple =1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=a , expected_max_diff=a )
@skip_mps
def lowercase__ ( self ):
snake_case__ : List[Any] =torch_device == """cpu"""
snake_case__ : List[Any] =True
snake_case__ : List[Any] =[
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
self._test_inference_batch_single_identical(
test_max_difference=a , relax_max_difference=a , additional_params_copy_to_batched_inputs=a , )
def lowercase__ ( self ):
snake_case__ : Dict =[
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
snake_case__ : List[Any] =[2, 3]
self._test_inference_batch_consistent(
batch_sizes=a , additional_params_copy_to_batched_inputs=a , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=a )
@skip_mps
def lowercase__ ( self ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowercase__ ( self ):
return super().test_save_load_local()
@skip_mps
def lowercase__ ( self ):
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def lowercase__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self ):
snake_case__ : str =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png""" )
snake_case__ : Union[str, Any] =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/unclip/karlo_v1_alpha_cat_variation_fp16.npy""" )
snake_case__ : List[Any] =UnCLIPImageVariationPipeline.from_pretrained(
"""kakaobrain/karlo-v1-alpha-image-variations""" , torch_dtype=torch.floataa )
snake_case__ : Optional[int] =pipeline.to(a )
pipeline.set_progress_bar_config(disable=a )
snake_case__ : Optional[Any] =torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case__ : Tuple =pipeline(
a , generator=a , output_type="""np""" , )
snake_case__ : Union[str, Any] =output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert_mean_pixel_difference(a , a , 1_5 )
| 448 | 0 |
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class __magic_name__ ( unittest.TestCase ):
def __init__( self : Any , lowerCamelCase__ : Optional[int] ):
lowerCAmelCase : Optional[Any] = parent
def _A ( self : Dict ):
return {}
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase : List[str] = '''<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR=\"FFFFFF\">
<HR>
<a href=\"http://google.com\">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style=\"color:#0000FF\">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>'''
lowerCAmelCase : Optional[int] = '''
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
'''
return [html_string_a, html_string_a]
@require_bsa
class __magic_name__ ( _snake_case, unittest.TestCase ):
_lowerCAmelCase = MarkupLMFeatureExtractor if is_bsa_available() else None
def _A ( self : Optional[int] ):
lowerCAmelCase : Union[str, Any] = MarkupLMFeatureExtractionTester(self )
@property
def _A ( self : Dict ):
return self.feature_extract_tester.prepare_feat_extract_dict()
def _A ( self : Tuple ):
lowerCAmelCase : List[str] = self.feature_extraction_class()
# Test not batched input
lowerCAmelCase : List[str] = get_html_strings()[0]
lowerCAmelCase : Optional[int] = feature_extractor(lowerCamelCase__ )
# fmt: off
lowerCAmelCase : str = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']]
lowerCAmelCase : List[Any] = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']]
# fmt: on
self.assertEqual(encoding.nodes , lowerCamelCase__ )
self.assertEqual(encoding.xpaths , lowerCamelCase__ )
# Test batched
lowerCAmelCase : str = get_html_strings()
lowerCAmelCase : Tuple = feature_extractor(lowerCamelCase__ )
# fmt: off
lowerCAmelCase : Union[str, Any] = expected_nodes + [['''My First Heading''', '''My first paragraph.''']]
lowerCAmelCase : List[Any] = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , lowerCamelCase__ )
self.assertEqual(encoding.xpaths , lowerCamelCase__ )
| 348 |
'''simple docstring'''
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class __a ( _snake_case ):
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase ,"""hidden_sizes""" ) )
self.parent.assertTrue(hasattr(lowerCamelCase ,"""num_attention_heads""" ) )
self.parent.assertTrue(hasattr(lowerCamelCase ,"""num_encoder_blocks""" ) )
class __a :
def __init__( self : Optional[Any] ,lowerCamelCase : List[str] ,lowerCamelCase : List[Any]=13 ,lowerCamelCase : Union[str, Any]=64 ,lowerCamelCase : Dict=3 ,lowerCamelCase : Optional[Any]=4 ,lowerCamelCase : Optional[Any]=[2, 2, 2, 2] ,lowerCamelCase : Tuple=[8, 4, 2, 1] ,lowerCamelCase : Dict=[16, 32, 64, 128] ,lowerCamelCase : Tuple=[1, 4, 8, 16] ,lowerCamelCase : str=[1, 2, 4, 8] ,lowerCamelCase : str=True ,lowerCamelCase : Union[str, Any]=True ,lowerCamelCase : Optional[Any]="gelu" ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : List[str]=0.1 ,lowerCamelCase : Optional[Any]=0.02 ,lowerCamelCase : int=3 ,lowerCamelCase : List[str]=None ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = num_encoder_blocks
__SCREAMING_SNAKE_CASE = sr_ratios
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = hidden_sizes
__SCREAMING_SNAKE_CASE = downsampling_rates
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = scope
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return SegformerConfig(
image_size=self.image_size ,num_channels=self.num_channels ,num_encoder_blocks=self.num_encoder_blocks ,depths=self.depths ,hidden_sizes=self.hidden_sizes ,num_attention_heads=self.num_attention_heads ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,)
def UpperCAmelCase__ ( self : Any ,lowerCamelCase : int ,lowerCamelCase : List[str] ,lowerCamelCase : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SegformerModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
__SCREAMING_SNAKE_CASE = __SCREAMING_SNAKE_CASE = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def UpperCAmelCase__ ( self : Any ,lowerCamelCase : int ,lowerCamelCase : List[str] ,lowerCamelCase : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = SegformerForSemanticSegmentation(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase ,labels=lowerCamelCase )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss ,0.0 )
def UpperCAmelCase__ ( self : str ,lowerCamelCase : Tuple ,lowerCamelCase : Optional[Any] ,lowerCamelCase : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = SegformerForSemanticSegmentation(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE = torch.randint(0 ,1 ,(self.batch_size, self.image_size, self.image_size) ).to(lowerCamelCase )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase ,labels=lowerCamelCase )
self.parent.assertGreater(result.loss ,0.0 )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __a ( _snake_case, _snake_case, unittest.TestCase ):
__UpperCamelCase : Dict = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase : Union[str, Any] = (
{
'feature-extraction': SegformerModel,
'image-classification': SegformerForImageClassification,
'image-segmentation': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : List[Any] = False
__UpperCamelCase : Union[str, Any] = False
__UpperCamelCase : Optional[int] = False
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SegformerModelTester(self )
__SCREAMING_SNAKE_CASE = SegformerConfigTester(self ,config_class=lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*lowerCamelCase )
@unittest.skip("""SegFormer does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""" )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
__SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = True
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCamelCase ,lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = outputs.attentions
__SCREAMING_SNAKE_CASE = sum(self.model_tester.depths )
self.assertEqual(len(lowerCamelCase ) ,lowerCamelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCamelCase ,lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = outputs.attentions
self.assertEqual(len(lowerCamelCase ) ,lowerCamelCase )
# verify the first attentions (first block, first layer)
__SCREAMING_SNAKE_CASE = (self.model_tester.image_size // 4) ** 2
__SCREAMING_SNAKE_CASE = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] ,)
# verify the last attentions (last block, last layer)
__SCREAMING_SNAKE_CASE = (self.model_tester.image_size // 32) ** 2
__SCREAMING_SNAKE_CASE = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) ,[self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] ,)
__SCREAMING_SNAKE_CASE = len(lowerCamelCase )
# Check attention is always last and order is fine
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCamelCase ,lowerCamelCase ) )
self.assertEqual(out_len + 1 ,len(lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = outputs.attentions
self.assertEqual(len(lowerCamelCase ) ,lowerCamelCase )
# verify the first attentions (first block, first layer)
__SCREAMING_SNAKE_CASE = (self.model_tester.image_size // 4) ** 2
__SCREAMING_SNAKE_CASE = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] ,)
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase : Tuple ,lowerCamelCase : List[Any] ,lowerCamelCase : Dict ):
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCamelCase ,lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = outputs.hidden_states
__SCREAMING_SNAKE_CASE = self.model_tester.num_encoder_blocks
self.assertEqual(len(lowerCamelCase ) ,lowerCamelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) ,[
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] ,)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
if not self.model_tester.is_training:
return
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCamelCase ):
continue
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.train()
__SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCamelCase ,lowerCamelCase ,return_labels=lowerCamelCase )
__SCREAMING_SNAKE_CASE = model(**lowerCamelCase ).loss
loss.backward()
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = SegformerModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def __magic_name__ ( ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class __a ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SegformerImageProcessor(
image_scale=(512, 512) ,keep_ratio=lowerCamelCase ,align=lowerCamelCase ,do_random_crop=lowerCamelCase )
__SCREAMING_SNAKE_CASE = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
lowerCamelCase )
__SCREAMING_SNAKE_CASE = prepare_img()
__SCREAMING_SNAKE_CASE = image_processor(images=lowerCamelCase ,return_tensors="""pt""" )
__SCREAMING_SNAKE_CASE = encoded_inputs.pixel_values.to(lowerCamelCase )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
__SCREAMING_SNAKE_CASE = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] ,lowerCamelCase ,atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SegformerImageProcessor(
image_scale=(512, 512) ,keep_ratio=lowerCamelCase ,align=lowerCamelCase ,do_random_crop=lowerCamelCase )
__SCREAMING_SNAKE_CASE = SegformerForSemanticSegmentation.from_pretrained(
"""nvidia/segformer-b1-finetuned-cityscapes-1024-1024""" ).to(lowerCamelCase )
__SCREAMING_SNAKE_CASE = prepare_img()
__SCREAMING_SNAKE_CASE = image_processor(images=lowerCamelCase ,return_tensors="""pt""" )
__SCREAMING_SNAKE_CASE = encoded_inputs.pixel_values.to(lowerCamelCase )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
__SCREAMING_SNAKE_CASE = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] ,lowerCamelCase ,atol=1E-1 ) )
@slow
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SegformerImageProcessor(
image_scale=(512, 512) ,keep_ratio=lowerCamelCase ,align=lowerCamelCase ,do_random_crop=lowerCamelCase )
__SCREAMING_SNAKE_CASE = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
lowerCamelCase )
__SCREAMING_SNAKE_CASE = prepare_img()
__SCREAMING_SNAKE_CASE = image_processor(images=lowerCamelCase ,return_tensors="""pt""" )
__SCREAMING_SNAKE_CASE = encoded_inputs.pixel_values.to(lowerCamelCase )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
__SCREAMING_SNAKE_CASE = outputs.logits.detach().cpu()
__SCREAMING_SNAKE_CASE = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase ,target_sizes=[(500, 300)] )
__SCREAMING_SNAKE_CASE = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase )
__SCREAMING_SNAKE_CASE = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape ,lowerCamelCase )
| 109 | 0 |
from __future__ import annotations
from collections.abc import Callable
__lowerCAmelCase : Optional[Any] = list[list[float | int]]
def __magic_name__ ( A : Optional[Any], A : int ):
'''simple docstring'''
a = len(lowerCamelCase__ )
a = [[0 for _ in range(size + 1 )] for _ in range(lowerCamelCase__ )]
a = 42
a = 42
a = 42
a = 42
a = 42
a = 42
for row in range(lowerCamelCase__ ):
for col in range(lowerCamelCase__ ):
a = matrix[row][col]
a = vector[row][0]
a = 0
a = 0
while row < size and col < size:
# pivoting
a = max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowerCamelCase__, lowerCamelCase__ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
a , a = augmented[pivot_row], augmented[row]
for rowa in range(row + 1, lowerCamelCase__ ):
a = augmented[rowa][col] / augmented[row][col]
a = 0
for cola in range(col + 1, size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1, lowerCamelCase__ ):
for row in range(lowerCamelCase__ ):
a = augmented[row][col] / augmented[col][col]
for cola in range(lowerCamelCase__, size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row], 10 )] for row in range(lowerCamelCase__ )
]
def __magic_name__ ( A : Dict ):
'''simple docstring'''
a = len(lowerCamelCase__ )
a = [[0 for _ in range(lowerCamelCase__ )] for _ in range(lowerCamelCase__ )]
a = [[0] for _ in range(lowerCamelCase__ )]
a = 42
a = 42
a = 42
a = 42
for x_val, y_val in enumerate(lowerCamelCase__ ):
for col in range(lowerCamelCase__ ):
a = (x_val + 1) ** (size - col - 1)
a = y_val
a = solve(lowerCamelCase__, lowerCamelCase__ )
def interpolated_func(A : Dict ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(lowerCamelCase__ ) )
return interpolated_func
def __magic_name__ ( A : List[Any] ):
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def __magic_name__ ( A : Optional[Any] = question_function, A : Union[str, Any] = 10 ):
'''simple docstring'''
a = [func(lowerCamelCase__ ) for x_val in range(1, order + 1 )]
a = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1, order + 1 )
]
a = 0
a = 42
a = 42
for poly in polynomials:
a = 1
while func(lowerCamelCase__ ) == poly(lowerCamelCase__ ):
x_val += 1
ret += poly(lowerCamelCase__ )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 721 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def __magic_name__ ( A : Union[str, Any] ):
'''simple docstring'''
a = fname.split(os.path.sep )[-1]
return re.search(R"^(.*)_\d+\.jpg$", A ).groups()[0]
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : Dict , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=None ) -> Tuple:
a = file_names
a = image_transform
a = label_to_id
def __len__( self : Any ) -> Tuple:
return len(self.file_names )
def __getitem__( self : List[Any] , __lowerCamelCase : List[Any] ) -> int:
a = self.file_names[idx]
a = PIL.Image.open(__lowerCamelCase )
a = raw_image.convert("RGB" )
if self.image_transform is not None:
a = self.image_transform(__lowerCamelCase )
a = extract_label(__lowerCamelCase )
if self.label_to_id is not None:
a = self.label_to_id[label]
return {"image": image, "label": label}
def __magic_name__ ( A : str, A : int ):
'''simple docstring'''
if args.with_tracking:
a = Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="all", project_dir=args.project_dir )
else:
a = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a = config["lr"]
a = int(config["num_epochs"] )
a = int(config["seed"] )
a = int(config["batch_size"] )
a = config["image_size"]
if not isinstance(A, (list, tuple) ):
a = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps, "isdigit" ):
if args.checkpointing_steps == "epoch":
a = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
a = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
a = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
a = os.path.split(A )[-1].split("." )[0]
accelerator.init_trackers(A, A )
# Grab all the image filenames
a = [os.path.join(args.data_dir, A ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
a = [extract_label(A ) for fname in file_names]
a = list(set(A ) )
id_to_label.sort()
a = {lbl: i for i, lbl in enumerate(A )}
# Set the seed before splitting the data.
np.random.seed(A )
torch.manual_seed(A )
torch.cuda.manual_seed_all(A )
# Split our filenames between train and validation
a = np.random.permutation(len(A ) )
a = int(0.8 * len(A ) )
a = random_perm[:cut]
a = random_perm[cut:]
# For training we use a simple RandomResizedCrop
a = Compose([RandomResizedCrop(A, scale=(0.5, 1.0) ), ToTensor()] )
a = PetsDataset(
[file_names[i] for i in train_split], image_transform=A, label_to_id=A )
# For evaluation, we use a deterministic Resize
a = Compose([Resize(A ), ToTensor()] )
a = PetsDataset([file_names[i] for i in eval_split], image_transform=A, label_to_id=A )
# Instantiate dataloaders.
a = DataLoader(A, shuffle=A, batch_size=A, num_workers=4 )
a = DataLoader(A, shuffle=A, batch_size=A, num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a = create_model("resnet50d", pretrained=A, num_classes=len(A ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
a = False
for param in model.get_classifier().parameters():
a = True
# We normalize the batches of images to be a bit faster.
a = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
a = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
a = torch.optim.Adam(params=model.parameters(), lr=lr / 25 )
# Instantiate learning rate scheduler
a = OneCycleLR(optimizer=A, max_lr=A, epochs=A, steps_per_epoch=len(A ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a , a , a , a , a = accelerator.prepare(
A, A, A, A, A )
# We need to keep track of how many total steps we have iterated over
a = 0
# We also need to keep track of the starting epoch so files are named properly
a = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
a = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
a = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
a = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
a = os.path.splitext(A )[0]
if "epoch" in training_difference:
a = int(training_difference.replace("epoch_", "" ) ) + 1
a = None
else:
a = int(training_difference.replace("step_", "" ) )
a = resume_step // len(A )
resume_step -= starting_epoch * len(A )
# Now we train the model
for epoch in range(A, A ):
model.train()
if args.with_tracking:
a = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
a = accelerator.skip_first_batches(A, A )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
a = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
a = {k: v.to(accelerator.device ) for k, v in batch.items()}
a = (batch["image"] - mean) / std
a = model(A )
a = torch.nn.functional.cross_entropy(A, batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(A )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(A, A ):
a = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
a = os.path.join(args.output_dir, A )
accelerator.save_state(A )
model.eval()
a = 0
a = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
a = {k: v.to(accelerator.device ) for k, v in batch.items()}
a = (batch["image"] - mean) / std
with torch.no_grad():
a = model(A )
a = outputs.argmax(dim=-1 )
a , a = accelerator.gather_for_metrics((predictions, batch["label"]) )
a = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
a = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(A ),
"epoch": epoch,
}, step=A, )
if checkpointing_steps == "epoch":
a = F"""epoch_{epoch}"""
if args.output_dir is not None:
a = os.path.join(args.output_dir, A )
accelerator.save_state(A )
if args.with_tracking:
accelerator.end_training()
def __magic_name__ ( ):
'''simple docstring'''
a = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir", required=A, help="The data folder on disk." )
parser.add_argument("--fp16", action="store_true", help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision", type=A, default=A, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.", )
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps", type=A, default=A, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", )
parser.add_argument(
"--output_dir", type=A, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", )
parser.add_argument(
"--resume_from_checkpoint", type=A, default=A, help="If the training should continue from a checkpoint folder.", )
parser.add_argument(
"--with_tracking", action="store_true", help="Whether to load in all available experiment trackers from the environment and use them for logging.", )
parser.add_argument(
"--project_dir", type=A, default="logs", help="Location on where to store experiment tracking logs` and relevent project information", )
a = parser.parse_args()
a = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(A, A )
if __name__ == "__main__":
main()
| 662 | 0 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_: int = logging.get_logger(__name__)
A_: Any = {"""vocab_file""": """spiece.model"""}
A_: str = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
}
}
A_: Union[str, Any] = {
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
A_: List[str] = """▁"""
class _lowercase ( _UpperCAmelCase ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , UpperCAmelCase , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase="[CLS]" , UpperCAmelCase="[SEP]" , UpperCAmelCase="<unk>" , UpperCAmelCase="[SEP]" , UpperCAmelCase="<pad>" , UpperCAmelCase="[CLS]" , UpperCAmelCase="[MASK]" , UpperCAmelCase = None , **UpperCAmelCase , ):
'''simple docstring'''
_lowercase = (
AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase , normalized=__lowerCAmelCase )
if isinstance(__lowerCAmelCase , __lowerCAmelCase )
else mask_token
)
_lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowerCAmelCase , remove_space=__lowerCAmelCase , keep_accents=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
_lowercase = do_lower_case
_lowercase = remove_space
_lowercase = keep_accents
_lowercase = vocab_file
_lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCAmelCase )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return len(self.sp_model )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
_lowercase = self.__dict__.copy()
_lowercase = None
return state
def __setstate__( self , UpperCAmelCase ):
'''simple docstring'''
_lowercase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowercase = {}
_lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCAmelCase ( self , UpperCAmelCase ):
'''simple docstring'''
if self.remove_space:
_lowercase = ''' '''.join(inputs.strip().split() )
else:
_lowercase = inputs
_lowercase = outputs.replace("""``""" , """\"""" ).replace("""\'\'""" , """\"""" )
if not self.keep_accents:
_lowercase = unicodedata.normalize("""NFKD""" , __lowerCAmelCase )
_lowercase = ''''''.join([c for c in outputs if not unicodedata.combining(__lowerCAmelCase )] )
if self.do_lower_case:
_lowercase = outputs.lower()
return outputs
def _UpperCAmelCase ( self , UpperCAmelCase ):
'''simple docstring'''
_lowercase = self.preprocess_text(__lowerCAmelCase )
_lowercase = self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
_lowercase = []
for piece in pieces:
if len(__lowerCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
_lowercase = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowerCAmelCase , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowercase = cur_pieces[1:]
else:
_lowercase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__lowerCAmelCase )
else:
new_pieces.append(__lowerCAmelCase )
return new_pieces
def _UpperCAmelCase ( self , UpperCAmelCase ):
'''simple docstring'''
return self.sp_model.PieceToId(__lowerCAmelCase )
def _UpperCAmelCase ( self , UpperCAmelCase ):
'''simple docstring'''
return self.sp_model.IdToPiece(__lowerCAmelCase )
def _UpperCAmelCase ( self , UpperCAmelCase ):
'''simple docstring'''
_lowercase = []
_lowercase = ''''''
_lowercase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
_lowercase = True
_lowercase = []
else:
current_sub_tokens.append(__lowerCAmelCase )
_lowercase = False
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string.strip()
def _UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ):
'''simple docstring'''
_lowercase = [self.sep_token_id]
_lowercase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1] + ([0] * len(__lowerCAmelCase )) + [1]
return [1] + ([0] * len(__lowerCAmelCase )) + [1]
def _UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ):
'''simple docstring'''
_lowercase = [self.sep_token_id]
_lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ):
'''simple docstring'''
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowercase = os.path.join(
__lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , """wb""" ) as fi:
_lowercase = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (out_vocab_file,)
| 398 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
assert isinstance(snake_case, snake_case )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''', [False, True] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = tmp_path / '''cache'''
__magic_name__ :int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__magic_name__ :Optional[Any] = ParquetDatasetReader(snake_case, cache_dir=snake_case, keep_in_memory=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
@pytest.mark.parametrize(
'''features''', [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
], )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :List[str] = tmp_path / '''cache'''
__magic_name__ :int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :Tuple = features.copy() if features else default_expected_features
__magic_name__ :Union[str, Any] = (
Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ :int = ParquetDatasetReader(snake_case, features=snake_case, cache_dir=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
@pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :str = tmp_path / '''cache'''
__magic_name__ :List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :int = ParquetDatasetReader(snake_case, cache_dir=snake_case, split=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''', [str, list] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
if issubclass(snake_case, snake_case ):
__magic_name__ :Union[str, Any] = parquet_path
elif issubclass(snake_case, snake_case ):
__magic_name__ :Union[str, Any] = [parquet_path]
__magic_name__ :Optional[int] = tmp_path / '''cache'''
__magic_name__ :Optional[int] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :str = ParquetDatasetReader(snake_case, cache_dir=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
def __lowercase ( snake_case, snake_case, snake_case=("train",) ):
"""simple docstring"""
assert isinstance(snake_case, snake_case )
for split in splits:
__magic_name__ :Optional[Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''', [False, True] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Any = tmp_path / '''cache'''
__magic_name__ :Optional[int] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__magic_name__ :Tuple = ParquetDatasetReader(
{'''train''': parquet_path}, cache_dir=snake_case, keep_in_memory=snake_case ).read()
_check_parquet_datasetdict(snake_case, snake_case )
@pytest.mark.parametrize(
'''features''', [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
], )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Optional[Any] = tmp_path / '''cache'''
__magic_name__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :int = features.copy() if features else default_expected_features
__magic_name__ :List[Any] = (
Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ :Optional[int] = ParquetDatasetReader({'''train''': parquet_path}, features=snake_case, cache_dir=snake_case ).read()
_check_parquet_datasetdict(snake_case, snake_case )
@pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
if split:
__magic_name__ :Dict = {split: parquet_path}
else:
__magic_name__ :Optional[int] = '''train'''
__magic_name__ :Dict = {'''train''': parquet_path, '''test''': parquet_path}
__magic_name__ :List[Any] = tmp_path / '''cache'''
__magic_name__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :Optional[Any] = ParquetDatasetReader(snake_case, cache_dir=snake_case ).read()
_check_parquet_datasetdict(snake_case, snake_case, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :str = ParquetDatasetWriter(snake_case, tmp_path / '''foo.parquet''' )
assert writer.write() > 0
__magic_name__ :List[Any] = pq.ParquetFile(tmp_path / '''foo.parquet''' )
__magic_name__ :List[Any] = pf.read()
assert dataset.data.table == output_table
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :List[str] = str(shared_datadir / '''test_image_rgb.jpg''' )
__magic_name__ :Tuple = {'''image''': [image_path]}
__magic_name__ :List[Any] = Features({'''image''': Image()} )
__magic_name__ :Tuple = Dataset.from_dict(snake_case, features=snake_case )
__magic_name__ :Union[str, Any] = ParquetDatasetWriter(snake_case, tmp_path / '''foo.parquet''' )
assert writer.write() > 0
__magic_name__ :List[str] = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
__magic_name__ :List[str] = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ), streaming=snake_case ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''', [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
], )
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
assert get_writer_batch_size(snake_case ) == expected
| 0 | 0 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCAmelCase ( __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = GPTSanJapaneseTokenizer
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : Optional[Any] = {"do_clean_text": False, "add_prefix_space": False}
def lowerCamelCase ( self ):
super().setUp()
# fmt: off
UpperCAmelCase__ : int = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>']
# fmt: on
UpperCAmelCase__ : Tuple = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀
UpperCAmelCase__ : int = {'unk_token': '<unk>'}
UpperCAmelCase__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''emoji_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.emoji_file , '''w''' ) as emoji_writer:
emoji_writer.write(json.dumps(_UpperCAmelCase ) )
def lowerCamelCase ( self , **_UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase ( self , _UpperCAmelCase ):
UpperCAmelCase__ : Tuple = 'こんにちは、世界。 \nこんばんは、㔺界。😀'
UpperCAmelCase__ : List[str] = 'こんにちは、世界。 \nこんばんは、世界。😀'
return input_text, output_text
def lowerCamelCase ( self , _UpperCAmelCase ):
UpperCAmelCase__ : Any = self.get_input_output_texts(_UpperCAmelCase )
UpperCAmelCase__ : int = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
UpperCAmelCase__ : Optional[Any] = tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
return text, ids
def lowerCamelCase ( self ):
pass # TODO add if relevant
def lowerCamelCase ( self ):
pass # TODO add if relevant
def lowerCamelCase ( self ):
pass # TODO add if relevant
def lowerCamelCase ( self ):
UpperCAmelCase__ : str = self.get_tokenizer()
# Testing tokenization
UpperCAmelCase__ : List[str] = 'こんにちは、世界。 こんばんは、㔺界。'
UpperCAmelCase__ : int = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。']
UpperCAmelCase__ : Optional[int] = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
# Testing conversion to ids without special tokens
UpperCAmelCase__ : List[Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
UpperCAmelCase__ : List[str] = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
# Testing conversion to ids with special tokens
UpperCAmelCase__ : Tuple = tokens + [tokenizer.unk_token]
UpperCAmelCase__ : str = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
UpperCAmelCase__ : Optional[int] = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase ( self ):
UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer()
# Testing tokenization
UpperCAmelCase__ : int = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'
UpperCAmelCase__ : List[Any] = 'こんにちは、、、、世界。こんばんは、、、、世界。'
UpperCAmelCase__ : List[str] = tokenizer.encode(_UpperCAmelCase )
UpperCAmelCase__ : Optional[int] = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
@slow
def lowerCamelCase ( self ):
UpperCAmelCase__ : Any = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
UpperCAmelCase__ : Union[str, Any] = 'こんにちは、世界。'
UpperCAmelCase__ : Any = 'こんばんは、㔺界。😀'
UpperCAmelCase__ : List[str] = 'こんにちは、世界。こんばんは、世界。😀'
UpperCAmelCase__ : Any = tokenizer.encode(prefix_text + input_text )
UpperCAmelCase__ : Union[str, Any] = tokenizer.encode('''''' , prefix_text=prefix_text + input_text )
UpperCAmelCase__ : List[Any] = tokenizer.encode(_UpperCAmelCase , prefix_text=_UpperCAmelCase )
UpperCAmelCase__ : List[Any] = tokenizer.decode(_UpperCAmelCase )
UpperCAmelCase__ : Union[str, Any] = tokenizer.decode(_UpperCAmelCase )
UpperCAmelCase__ : List[Any] = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
@slow
def lowerCamelCase ( self ):
UpperCAmelCase__ : Union[str, Any] = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
UpperCAmelCase__ : Optional[int] = 'こんにちは、世界。'
UpperCAmelCase__ : List[str] = 'こんばんは、㔺界。😀'
UpperCAmelCase__ : Optional[Any] = len(tokenizer.encode(_UpperCAmelCase ) ) - 2
UpperCAmelCase__ : Dict = len(tokenizer.encode(_UpperCAmelCase ) ) - 2
UpperCAmelCase__ : int = [1] + [0] * (len_prefix + len_text + 1)
UpperCAmelCase__ : Any = [1] * (len_prefix + len_text + 1) + [0]
UpperCAmelCase__ : Any = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
UpperCAmelCase__ : List[str] = tokenizer(prefix_text + input_text ).token_type_ids
UpperCAmelCase__ : Optional[Any] = tokenizer('''''' , prefix_text=prefix_text + input_text ).token_type_ids
UpperCAmelCase__ : Dict = tokenizer(_UpperCAmelCase , prefix_text=_UpperCAmelCase ).token_type_ids
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
@slow
def lowerCamelCase ( self ):
UpperCAmelCase__ : Dict = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
UpperCAmelCase__ : int = tokenizer.encode('''あンいワ''' )
UpperCAmelCase__ : Union[str, Any] = tokenizer.encode('''''' , prefix_text='''あンいワ''' )
UpperCAmelCase__ : List[Any] = tokenizer.encode('''いワ''' , prefix_text='''あン''' )
self.assertEqual(tokenizer.decode(_UpperCAmelCase ) , tokenizer.decode(_UpperCAmelCase ) )
self.assertEqual(tokenizer.decode(_UpperCAmelCase ) , tokenizer.decode(_UpperCAmelCase ) )
self.assertNotEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertNotEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def lowerCamelCase ( self ):
UpperCAmelCase__ : str = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
UpperCAmelCase__ : Any = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']]
UpperCAmelCase__ : Optional[int] = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase )
UpperCAmelCase__ : str = tokenizer.batch_encode_plus(_UpperCAmelCase , padding=_UpperCAmelCase )
# fmt: off
UpperCAmelCase__ : str = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]
UpperCAmelCase__ : int = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
UpperCAmelCase__ : Tuple = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , _UpperCAmelCase )
self.assertListEqual(x_token.token_type_ids , _UpperCAmelCase )
self.assertListEqual(x_token.attention_mask , _UpperCAmelCase )
self.assertListEqual(x_token_a.input_ids , _UpperCAmelCase )
self.assertListEqual(x_token_a.token_type_ids , _UpperCAmelCase )
self.assertListEqual(x_token_a.attention_mask , _UpperCAmelCase )
def lowerCamelCase ( self ):
pass
def lowerCamelCase ( self ):
pass | 704 |
'''simple docstring'''
def lowerCAmelCase__ ( a_ : float , a_ : list[float] ) -> float:
if discount_rate < 0:
raise ValueError('''Discount rate cannot be negative''' )
if not cash_flows:
raise ValueError('''Cash flows list cannot be empty''' )
UpperCAmelCase__ : Tuple = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(a_ ) )
return round(a_ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 599 | 0 |
"""simple docstring"""
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
__magic_name__ = get_tests_dir("""fixtures/dummy-config.json""")
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __UpperCAmelCase ( self : List[Any] ):
lowerCamelCase__ = 0
def __UpperCAmelCase ( self : Optional[Any] ):
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) )
def __UpperCAmelCase ( self : str ):
lowerCamelCase__ = AutoConfig.from_pretrained("""bert-base-uncased""" )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : Optional[int] ):
lowerCamelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : Tuple ):
lowerCamelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : Union[str, Any] ):
lowerCamelCase__ = AutoConfig.for_model("""roberta""" )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : Any ):
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
lowerCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE_ , """fake-roberta""" )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , """config.json""" ) , """w""" ) as f:
f.write(json.dumps({} ) )
lowerCamelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertEqual(type(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : List[str] ):
try:
AutoConfig.register("""custom""" , SCREAMING_SNAKE_CASE_ )
# Wrong model type will raise an error
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
AutoConfig.register("""model""" , SCREAMING_SNAKE_CASE_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
AutoConfig.register("""bert""" , SCREAMING_SNAKE_CASE_ )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCamelCase__ = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def __UpperCAmelCase ( self : Dict ):
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE_ , """bert-base is not a local folder and is not a valid model identifier""" ):
lowerCamelCase__ = AutoConfig.from_pretrained("""bert-base""" )
def __UpperCAmelCase ( self : Optional[Any] ):
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE_ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowerCamelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ , revision="""aaaaaa""" )
def __UpperCAmelCase ( self : List[Any] ):
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE_ , """hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" , ):
lowerCamelCase__ = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" )
def __UpperCAmelCase ( self : Tuple ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase__ = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase__ = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=SCREAMING_SNAKE_CASE_ )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ , trust_remote_code=SCREAMING_SNAKE_CASE_ )
self.assertEqual(reloaded_config.__class__.__name__ , """NewModelConfig""" )
def __UpperCAmelCase ( self : Optional[int] ):
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
snake_case = "new-model"
try:
AutoConfig.register("""new-model""" , SCREAMING_SNAKE_CASE_ )
# If remote code is not set, the default is to use local
lowerCamelCase__ = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote code is disabled, we load the local one.
lowerCamelCase__ = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=SCREAMING_SNAKE_CASE_ )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote is enabled, we load from the Hub
lowerCamelCase__ = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=SCREAMING_SNAKE_CASE_ )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 129 |
"""simple docstring"""
def _A ( __lowercase , __lowercase ):
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 129 | 1 |
"""simple docstring"""
def _lowerCAmelCase ( __lowerCamelCase:int = 1_0_0 ):
'''simple docstring'''
__magic_name__ = n * (n + 1) * (2 * n + 1) / 6
__magic_name__ = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 707 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A_ ( snake_case_ , unittest.TestCase ):
UpperCAmelCase__ = KandinskyVaaPriorPipeline
UpperCAmelCase__ = ['''prompt''']
UpperCAmelCase__ = ['''prompt''', '''negative_prompt''']
UpperCAmelCase__ = [
'''num_images_per_prompt''',
'''generator''',
'''num_inference_steps''',
'''latents''',
'''negative_prompt''',
'''guidance_scale''',
'''output_type''',
'''return_dict''',
]
UpperCAmelCase__ = False
@property
def _snake_case ( self : Dict ) -> Dict:
return 3_2
@property
def _snake_case ( self : Tuple ) -> Tuple:
return 3_2
@property
def _snake_case ( self : Dict ) -> Optional[int]:
return self.time_input_dim
@property
def _snake_case ( self : List[Any] ) -> str:
return self.time_input_dim * 4
@property
def _snake_case ( self : List[str] ) -> List[Any]:
return 1_0_0
@property
def _snake_case ( self : List[str] ) -> Tuple:
__magic_name__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def _snake_case ( self : Optional[int] ) -> Optional[int]:
torch.manual_seed(0 )
__magic_name__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(__lowerCamelCase )
@property
def _snake_case ( self : List[str] ) -> Union[str, Any]:
torch.manual_seed(0 )
__magic_name__ = {
"num_attention_heads": 2,
"attention_head_dim": 1_2,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
__magic_name__ = PriorTransformer(**__lowerCamelCase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__magic_name__ = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def _snake_case ( self : Union[str, Any] ) -> Union[str, Any]:
torch.manual_seed(0 )
__magic_name__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
__magic_name__ = CLIPVisionModelWithProjection(__lowerCamelCase )
return model
@property
def _snake_case ( self : Union[str, Any] ) -> int:
__magic_name__ = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=__lowerCamelCase , do_normalize=__lowerCamelCase , do_resize=__lowerCamelCase , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_2_4 , )
return image_processor
def _snake_case ( self : List[Any] ) -> Optional[Any]:
__magic_name__ = self.dummy_prior
__magic_name__ = self.dummy_image_encoder
__magic_name__ = self.dummy_text_encoder
__magic_name__ = self.dummy_tokenizer
__magic_name__ = self.dummy_image_processor
__magic_name__ = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_0_0_0 , clip_sample=__lowerCamelCase , clip_sample_range=10.0 , )
__magic_name__ = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def _snake_case ( self : int , __lowerCamelCase : List[Any] , __lowerCamelCase : str=0 ) -> Optional[int]:
if str(__lowerCamelCase ).startswith("mps" ):
__magic_name__ = torch.manual_seed(__lowerCamelCase )
else:
__magic_name__ = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
__magic_name__ = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def _snake_case ( self : Optional[Any] ) -> Optional[Any]:
__magic_name__ = "cpu"
__magic_name__ = self.get_dummy_components()
__magic_name__ = self.pipeline_class(**__lowerCamelCase )
__magic_name__ = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
__magic_name__ = pipe(**self.get_dummy_inputs(__lowerCamelCase ) )
__magic_name__ = output.image_embeds
__magic_name__ = pipe(
**self.get_dummy_inputs(__lowerCamelCase ) , return_dict=__lowerCamelCase , )[0]
__magic_name__ = image[0, -1_0:]
__magic_name__ = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
__magic_name__ = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def _snake_case ( self : Dict ) -> List[str]:
__magic_name__ = torch_device == "cpu"
__magic_name__ = True
__magic_name__ = False
self._test_inference_batch_single_identical(
test_max_difference=__lowerCamelCase , relax_max_difference=__lowerCamelCase , test_mean_pixel_difference=__lowerCamelCase , )
@skip_mps
def _snake_case ( self : Tuple ) -> Tuple:
__magic_name__ = torch_device == "cpu"
__magic_name__ = False
self._test_attention_slicing_forward_pass(
test_max_difference=__lowerCamelCase , test_mean_pixel_difference=__lowerCamelCase , )
| 468 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class _A ( __lowercase ):
__a = """markuplm"""
def __init__( self , _SCREAMING_SNAKE_CASE=3_0522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-12 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=216 , _SCREAMING_SNAKE_CASE=1001 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=50 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ):
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = use_cache
_UpperCAmelCase = classifier_dropout
# additional properties
_UpperCAmelCase = max_depth
_UpperCAmelCase = max_xpath_tag_unit_embeddings
_UpperCAmelCase = max_xpath_subs_unit_embeddings
_UpperCAmelCase = tag_pad_id
_UpperCAmelCase = subs_pad_id
_UpperCAmelCase = xpath_unit_hidden_size | 518 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
a = "facebook/wmt19-en-de"
a = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
a = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
a = FSMTForConditionalGeneration(config)
print(F'num of params {tiny_model.num_parameters()}')
# Test
a = tokenizer(["Making tiny model"], return_tensors="pt")
a = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
a = "tiny-wmt19-en-de"
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'Generated {mname_tiny}')
# Upload
# transformers-cli upload tiny-wmt19-en-de | 518 | 1 |
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _UpperCamelCase ( unittest.TestCase ):
UpperCAmelCase_ = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
UpperCAmelCase_ = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def UpperCAmelCase_ ( self :List[Any] , lowerCamelCase :Optional[int] , lowerCamelCase :Tuple , lowerCamelCase :Dict ) -> Dict:
UpperCAmelCase__ = AudioClassificationPipeline(model=UpperCamelCase_ , feature_extractor=UpperCamelCase_ )
# test with a raw waveform
UpperCAmelCase__ = np.zeros((3_4000,) )
UpperCAmelCase__ = np.zeros((1_4000,) )
return audio_classifier, [audioa, audio]
def UpperCAmelCase_ ( self :List[str] , lowerCamelCase :List[Any] , lowerCamelCase :Dict ) -> Any:
UpperCAmelCase__ , UpperCAmelCase__ = examples
UpperCAmelCase__ = audio_classifier(UpperCamelCase_ )
# by default a model is initialized with num_labels=2
self.assertEqual(
UpperCamelCase_ , [
{"score": ANY(UpperCamelCase_ ), "label": ANY(UpperCamelCase_ )},
{"score": ANY(UpperCamelCase_ ), "label": ANY(UpperCamelCase_ )},
] , )
UpperCAmelCase__ = audio_classifier(UpperCamelCase_ , top_k=1 )
self.assertEqual(
UpperCamelCase_ , [
{"score": ANY(UpperCamelCase_ ), "label": ANY(UpperCamelCase_ )},
] , )
self.run_torchaudio(UpperCamelCase_ )
@require_torchaudio
def UpperCAmelCase_ ( self :int , lowerCamelCase :Optional[Any] ) -> List[str]:
import datasets
# test with a local file
UpperCAmelCase__ = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
UpperCAmelCase__ = dataset[0]["audio"]["array"]
UpperCAmelCase__ = audio_classifier(UpperCamelCase_ )
self.assertEqual(
UpperCamelCase_ , [
{"score": ANY(UpperCamelCase_ ), "label": ANY(UpperCamelCase_ )},
{"score": ANY(UpperCamelCase_ ), "label": ANY(UpperCamelCase_ )},
] , )
@require_torch
def UpperCAmelCase_ ( self :List[Any] ) -> List[str]:
UpperCAmelCase__ = "anton-l/wav2vec2-random-tiny-classifier"
UpperCAmelCase__ = pipeline("audio-classification" , model=UpperCamelCase_ )
UpperCAmelCase__ = np.ones((8000,) )
UpperCAmelCase__ = audio_classifier(UpperCamelCase_ , top_k=4 )
UpperCAmelCase__ = [
{"score": 0.08_42, "label": "no"},
{"score": 0.08_38, "label": "up"},
{"score": 0.08_37, "label": "go"},
{"score": 0.08_34, "label": "right"},
]
UpperCAmelCase__ = [
{"score": 0.08_45, "label": "stop"},
{"score": 0.08_44, "label": "on"},
{"score": 0.08_41, "label": "right"},
{"score": 0.08_34, "label": "left"},
]
self.assertIn(nested_simplify(UpperCamelCase_ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
UpperCAmelCase__ = {"array": np.ones((8000,) ), "sampling_rate": audio_classifier.feature_extractor.sampling_rate}
UpperCAmelCase__ = audio_classifier(UpperCamelCase_ , top_k=4 )
self.assertIn(nested_simplify(UpperCamelCase_ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def UpperCAmelCase_ ( self :str ) -> List[Any]:
import datasets
UpperCAmelCase__ = "superb/wav2vec2-base-superb-ks"
UpperCAmelCase__ = pipeline("audio-classification" , model=UpperCamelCase_ )
UpperCAmelCase__ = datasets.load_dataset("anton-l/superb_dummy" , "ks" , split="test" )
UpperCAmelCase__ = np.array(dataset[3]["speech"] , dtype=np.floataa )
UpperCAmelCase__ = audio_classifier(UpperCamelCase_ , top_k=4 )
self.assertEqual(
nested_simplify(UpperCamelCase_ , decimals=3 ) , [
{"score": 0.9_81, "label": "go"},
{"score": 0.0_07, "label": "up"},
{"score": 0.0_06, "label": "_unknown_"},
{"score": 0.0_01, "label": "down"},
] , )
@require_tf
@unittest.skip("Audio classification is not implemented for TF" )
def UpperCAmelCase_ ( self :Tuple ) -> Optional[Any]:
pass
| 714 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class _UpperCamelCase ( lowerCAmelCase ):
# to overwrite at feature extractactor specific tests
UpperCAmelCase_ = None
UpperCAmelCase_ = None
@property
def UpperCAmelCase_ ( self :int ) -> int:
return self.feat_extract_tester.prepare_feat_extract_dict()
def UpperCAmelCase_ ( self :Any ) -> str:
UpperCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(lowerCamelCase , "feature_size" ) )
self.assertTrue(hasattr(lowerCamelCase , "sampling_rate" ) )
self.assertTrue(hasattr(lowerCamelCase , "padding_value" ) )
def UpperCAmelCase_ ( self :str ) -> int:
UpperCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_common()
UpperCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase__ = feat_extract.model_input_names[0]
UpperCAmelCase__ = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(lowerCamelCase ) == len(lowerCamelCase ) for x, y in zip(lowerCamelCase , processed_features[input_name] ) ) )
UpperCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCamelCase )
UpperCAmelCase__ = BatchFeature({input_name: speech_inputs} , tensor_type="np" )
UpperCAmelCase__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def UpperCAmelCase_ ( self :Dict ) -> Union[str, Any]:
UpperCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCamelCase )
UpperCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase__ = feat_extract.model_input_names[0]
UpperCAmelCase__ = BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
UpperCAmelCase__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def UpperCAmelCase_ ( self :Tuple ) -> Dict:
UpperCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCamelCase )
UpperCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase__ = feat_extract.model_input_names[0]
UpperCAmelCase__ = BatchFeature({input_name: speech_inputs} , tensor_type="tf" )
UpperCAmelCase__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def UpperCAmelCase_ ( self :int , lowerCamelCase :int=False ) -> str:
def _inputs_have_equal_length(lowerCamelCase :Union[str, Any] ):
UpperCAmelCase__ = len(input[0] )
for input_slice in input[1:]:
if len(lowerCamelCase ) != length:
return False
return True
def _inputs_are_equal(lowerCamelCase :Dict , lowerCamelCase :Optional[Any] ):
if len(lowerCamelCase ) != len(lowerCamelCase ):
return False
for input_slice_a, input_slice_a in zip(lowerCamelCase , lowerCamelCase ):
if not np.allclose(np.asarray(lowerCamelCase ) , np.asarray(lowerCamelCase ) , atol=1e-3 ):
return False
return True
UpperCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCamelCase )
UpperCAmelCase__ = feat_extract.model_input_names[0]
UpperCAmelCase__ = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase__ = self.feat_extract_tester.seq_length_diff
UpperCAmelCase__ = self.feat_extract_tester.max_seq_length + pad_diff
UpperCAmelCase__ = self.feat_extract_tester.min_seq_length
UpperCAmelCase__ = self.feat_extract_tester.batch_size
UpperCAmelCase__ = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
UpperCAmelCase__ = feat_extract.pad(lowerCamelCase , padding=lowerCamelCase )
UpperCAmelCase__ = input_a[input_name]
UpperCAmelCase__ = feat_extract.pad(lowerCamelCase , padding="longest" )
UpperCAmelCase__ = input_a[input_name]
UpperCAmelCase__ = feat_extract.pad(lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[-1] ) )
UpperCAmelCase__ = input_a[input_name]
UpperCAmelCase__ = feat_extract.pad(lowerCamelCase , padding="longest" , return_tensors="np" )
UpperCAmelCase__ = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(lowerCamelCase ):
feat_extract.pad(lowerCamelCase , padding="max_length" )[input_name]
UpperCAmelCase__ = feat_extract.pad(
lowerCamelCase , padding="max_length" , max_length=lowerCamelCase , return_tensors="np" )
UpperCAmelCase__ = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(lowerCamelCase ) )
self.assertTrue(_inputs_have_equal_length(lowerCamelCase ) )
self.assertTrue(_inputs_have_equal_length(lowerCamelCase ) )
self.assertTrue(_inputs_are_equal(lowerCamelCase , lowerCamelCase ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
UpperCAmelCase__ = feat_extract.pad(lowerCamelCase , pad_to_multiple_of=10 )
UpperCAmelCase__ = input_a[input_name]
UpperCAmelCase__ = feat_extract.pad(lowerCamelCase , padding="longest" , pad_to_multiple_of=10 )
UpperCAmelCase__ = input_a[input_name]
UpperCAmelCase__ = feat_extract.pad(
lowerCamelCase , padding="max_length" , pad_to_multiple_of=10 , max_length=lowerCamelCase )
UpperCAmelCase__ = input_a[input_name]
UpperCAmelCase__ = feat_extract.pad(
lowerCamelCase , padding="max_length" , pad_to_multiple_of=10 , max_length=lowerCamelCase , return_tensors="np" , )
UpperCAmelCase__ = input_a[input_name]
self.assertTrue(all(len(lowerCamelCase ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(lowerCamelCase , lowerCamelCase ) )
UpperCAmelCase__ = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(lowerCamelCase ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
UpperCAmelCase__ = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def UpperCAmelCase_ ( self :List[str] , lowerCamelCase :int=False ) -> str:
def _inputs_have_equal_length(lowerCamelCase :Any ):
UpperCAmelCase__ = len(input[0] )
for input_slice in input[1:]:
if len(lowerCamelCase ) != length:
return False
return True
def _inputs_are_equal(lowerCamelCase :Optional[int] , lowerCamelCase :str ):
if len(lowerCamelCase ) != len(lowerCamelCase ):
return False
for input_slice_a, input_slice_a in zip(lowerCamelCase , lowerCamelCase ):
if not np.allclose(np.asarray(lowerCamelCase ) , np.asarray(lowerCamelCase ) , atol=1e-3 ):
return False
return True
UpperCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCamelCase )
UpperCAmelCase__ = feat_extract.model_input_names[0]
UpperCAmelCase__ = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
UpperCAmelCase__ = feat_extract.pad(
lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[0] ) , truncation=lowerCamelCase )
UpperCAmelCase__ = input_a[input_name]
UpperCAmelCase__ = feat_extract.pad(lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[0] ) )
UpperCAmelCase__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCamelCase ) )
self.assertFalse(_inputs_have_equal_length(lowerCamelCase ) )
# truncate to smallest with np
UpperCAmelCase__ = feat_extract.pad(
lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[0] ) , return_tensors="np" , truncation=lowerCamelCase , )
UpperCAmelCase__ = input_a[input_name]
UpperCAmelCase__ = feat_extract.pad(
lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[0] ) , return_tensors="np" )
UpperCAmelCase__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCamelCase ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCamelCase ) )
# truncate to middle
UpperCAmelCase__ = feat_extract.pad(
lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[1] ) , truncation=lowerCamelCase , return_tensors="np" , )
UpperCAmelCase__ = input_a[input_name]
UpperCAmelCase__ = feat_extract.pad(
lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[1] ) , truncation=lowerCamelCase )
UpperCAmelCase__ = input_a[input_name]
UpperCAmelCase__ = feat_extract.pad(
lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[1] ) , return_tensors="np" )
UpperCAmelCase__ = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(lowerCamelCase ) )
self.assertTrue(_inputs_have_equal_length(lowerCamelCase ) )
self.assertTrue(_inputs_are_equal(lowerCamelCase , lowerCamelCase ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCamelCase ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCamelCase ):
feat_extract.pad(lowerCamelCase , truncation=lowerCamelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCamelCase ):
feat_extract.pad(lowerCamelCase , padding="longest" , truncation=lowerCamelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCamelCase ):
feat_extract.pad(lowerCamelCase , padding="longest" , truncation=lowerCamelCase )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(lowerCamelCase ):
feat_extract.pad(lowerCamelCase , padding="max_length" , truncation=lowerCamelCase )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
UpperCAmelCase__ = 12
UpperCAmelCase__ = feat_extract.pad(
lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=lowerCamelCase , truncation=lowerCamelCase , )
UpperCAmelCase__ = input_a[input_name]
UpperCAmelCase__ = feat_extract.pad(
lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=lowerCamelCase , )
UpperCAmelCase__ = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
UpperCAmelCase__ = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
UpperCAmelCase__ = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(lowerCamelCase ) )
self.assertFalse(_inputs_have_equal_length(lowerCamelCase ) )
def UpperCAmelCase_ ( self :int ) -> List[str]:
self._check_padding(numpify=lowerCamelCase )
def UpperCAmelCase_ ( self :List[Any] ) -> int:
self._check_padding(numpify=lowerCamelCase )
def UpperCAmelCase_ ( self :str ) -> str:
self._check_truncation(numpify=lowerCamelCase )
def UpperCAmelCase_ ( self :Dict ) -> str:
self._check_truncation(numpify=lowerCamelCase )
@require_torch
def UpperCAmelCase_ ( self :int ) -> Any:
UpperCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_common()
UpperCAmelCase__ = feat_extract.model_input_names[0]
UpperCAmelCase__ = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase__ = feat_extract.pad(lowerCamelCase , padding="longest" , return_tensors="np" )[input_name]
UpperCAmelCase__ = feat_extract.pad(lowerCamelCase , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def UpperCAmelCase_ ( self :List[Any] ) -> Optional[Any]:
UpperCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_common()
UpperCAmelCase__ = feat_extract.model_input_names[0]
UpperCAmelCase__ = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase__ = feat_extract.pad(lowerCamelCase , padding="longest" , return_tensors="np" )[input_name]
UpperCAmelCase__ = feat_extract.pad(lowerCamelCase , padding="longest" , return_tensors="tf" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def UpperCAmelCase_ ( self :List[str] ) -> str:
UpperCAmelCase__ = self.feat_extract_dict
UpperCAmelCase__ = True
UpperCAmelCase__ = self.feature_extraction_class(**lowerCamelCase )
UpperCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_common()
UpperCAmelCase__ = [len(lowerCamelCase ) for x in speech_inputs]
UpperCAmelCase__ = feat_extract.model_input_names[0]
UpperCAmelCase__ = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase__ = feat_extract.pad(lowerCamelCase , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , lowerCamelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , lowerCamelCase )
def UpperCAmelCase_ ( self :int ) -> int:
UpperCAmelCase__ = self.feat_extract_dict
UpperCAmelCase__ = True
UpperCAmelCase__ = self.feature_extraction_class(**lowerCamelCase )
UpperCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_common()
UpperCAmelCase__ = [len(lowerCamelCase ) for x in speech_inputs]
UpperCAmelCase__ = feat_extract.model_input_names[0]
UpperCAmelCase__ = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase__ = min(lowerCamelCase )
UpperCAmelCase__ = feat_extract.pad(
lowerCamelCase , padding="max_length" , max_length=lowerCamelCase , truncation=lowerCamelCase , return_tensors="np" )
self.assertIn("attention_mask" , lowerCamelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 364 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase__ = {
'configuration_gpt_bigcode': ['GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTBigCodeConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTBigCodeForSequenceClassification',
'GPTBigCodeForTokenClassification',
'GPTBigCodeForCausalLM',
'GPTBigCodeModel',
'GPTBigCodePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 322 |
def UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> int:
'''simple docstring'''
def count_of_possible_combinations(UpperCAmelCase_ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(UpperCAmelCase_ )
def UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> int:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
UpperCAmelCase_ , UpperCAmelCase_ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_lowercase : str = sum(
count_of_possible_combinations_with_dp_array(target - item , UpperCAmelCase_ )
for item in array )
_lowercase : Optional[Any] = answer
return answer
_lowercase : Optional[int] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> int:
'''simple docstring'''
_lowercase : Union[str, Any] = [0] * (target + 1)
_lowercase : Dict = 1
for i in range(1 , target + 1 ):
for j in range(UpperCAmelCase_ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = 3
UpperCamelCase__ = 5
UpperCamelCase__ = [1, 2, 5]
print(combination_sum_iv(n, array, target)) | 322 | 1 |
import sys
__lowercase : List[Any] = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def lowerCamelCase_ ( _lowerCamelCase : str ):
lowerCamelCase_ = 1
for digit in s:
product *= int(_lowerCamelCase )
return product
def lowerCamelCase_ ( _lowerCamelCase : str = N ):
lowerCamelCase_ = -sys.maxsize - 1
lowerCamelCase_ = n[:1_3]
lowerCamelCase_ = 1_3
while cur_index < len(_lowerCamelCase ) - 1_3:
if int(n[cur_index] ) >= int(substr[0] ):
lowerCamelCase_ = substr[1:] + n[cur_index]
cur_index += 1
else:
lowerCamelCase_ = max(_lowerCamelCase , str_eval(_lowerCamelCase ) )
lowerCamelCase_ = n[cur_index : cur_index + 1_3]
cur_index += 1_3
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''') | 702 |
"""simple docstring"""
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowerCAmelCase :
"""simple docstring"""
@staticmethod
def _lowerCAmelCase ( *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def _lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
lowerCamelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase_ = image_classifier(UpperCamelCase__ , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(UpperCamelCase__ ) , [
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}],
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}],
] , )
lowerCamelCase_ = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [
[
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
],
[
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
],
[
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
],
[
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
],
[
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
],
] , )
@require_tf
def _lowerCAmelCase ( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
lowerCamelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase_ = image_classifier(UpperCamelCase__ , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , )
lowerCamelCase_ = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [
[
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
],
[
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
],
[
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
],
[
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
],
[
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
],
] , )
@slow
@require_torch
def _lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase_ = image_classifier(UpperCamelCase__ , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
lowerCamelCase_ = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def _lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase_ = image_classifier(UpperCamelCase__ , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
lowerCamelCase_ = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , ) | 66 | 0 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
__SCREAMING_SNAKE_CASE : Optional[int] = 1.0_54_57_18_17E-34 # unit of ℏ : J * s
__SCREAMING_SNAKE_CASE : Dict = 3E8 # unit of c : m * s^-1
def snake_case (__lowercase , __lowercase , __lowercase ) -> dict[str, float]:
'''simple docstring'''
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
_snake_case : str = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_snake_case : Tuple = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_snake_case : str = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod() | 670 |
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
snake_case : Optional[Any] = """."""
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
snake_case : List[str] = [
"""Assert""",
"""AssignVariableOp""",
"""EmptyTensorList""",
"""MergeV2Checkpoints""",
"""ReadVariableOp""",
"""ResourceGather""",
"""RestoreV2""",
"""SaveV2""",
"""ShardedFilename""",
"""StatefulPartitionedCall""",
"""StaticRegexFullMatch""",
"""VarHandleOp""",
]
def A ( __snake_case: Optional[Any] , __snake_case: Tuple , __snake_case: Any ) -> int:
"""simple docstring"""
__magic_name__ = SavedModel()
__magic_name__ = []
with open(os.path.join(__snake_case , 'utils' , 'tf_ops' , 'onnx.json' ) ) as f:
__magic_name__ = json.load(__snake_case )['opsets']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__snake_case )] )
with open(__snake_case , 'rb' ) as f:
saved_model.ParseFromString(f.read() )
__magic_name__ = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
__magic_name__ = sorted(__snake_case )
__magic_name__ = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__snake_case )
if strict and len(__snake_case ) > 0:
raise Exception(F"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(__snake_case ) > 0:
print(F"""Found the following incompatible ops for the opset {opset}:""" )
print(*__snake_case , sep='\n' )
else:
print(F"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
snake_case : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--saved_model_path""", help="""Path of the saved model to check (the .pb file).""")
parser.add_argument(
"""--opset""", default=1_2, type=int, help="""The ONNX opset against which the model has to be tested."""
)
parser.add_argument(
"""--framework""", choices=["""onnx"""], default="""onnx""", help="""Frameworks against which to test the saved model."""
)
parser.add_argument(
"""--strict""", action="""store_true""", help="""Whether make the checking strict (raise errors) or not (raise warnings)"""
)
snake_case : Any = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset) | 545 | 0 |
'''simple docstring'''
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def __magic_name__ ( __UpperCAmelCase = "isbn/0140328726" ) -> Any:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("""/""" ) != 1:
__SCREAMING_SNAKE_CASE = f"""{olid} is not a valid Open Library olid"""
raise ValueError(snake_case__ )
return requests.get(f"""https://openlibrary.org/{new_olid}.json""" ).json()
def __magic_name__ ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {
"""title""": """Title""",
"""publish_date""": """Publish date""",
"""authors""": """Authors""",
"""number_of_pages""": """Number of pages:""",
"""first_sentence""": """First sentence""",
"""isbn_10""": """ISBN (10)""",
"""isbn_13""": """ISBN (13)""",
}
__SCREAMING_SNAKE_CASE = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
__SCREAMING_SNAKE_CASE = [
get_openlibrary_data(author["""key"""] )["""name"""] for author in data["""Authors"""]
]
__SCREAMING_SNAKE_CASE = data["""First sentence"""]["""value"""]
for key, value in data.items():
if isinstance(snake_case__ , snake_case__ ):
__SCREAMING_SNAKE_CASE = """, """.join(snake_case__ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
a = input("\nEnter the ISBN code to search (or \'quit\' to stop): ").strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(F'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
a = summarize_book(get_openlibrary_data(F'''isbn/{isbn}'''))
print("\n".join(F'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F'''Sorry, there are no results for ISBN: {isbn}.''')
| 721 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a = logging.get_logger(__name__)
a = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class __a ( _snake_case ):
__UpperCamelCase : Dict = 'deta'
__UpperCamelCase : List[str] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Tuple ,lowerCamelCase : List[Any]=None ,lowerCamelCase : Any=900 ,lowerCamelCase : int=2048 ,lowerCamelCase : Any=6 ,lowerCamelCase : Optional[Any]=2048 ,lowerCamelCase : str=8 ,lowerCamelCase : Union[str, Any]=6 ,lowerCamelCase : List[str]=1024 ,lowerCamelCase : int=8 ,lowerCamelCase : Any=0.0 ,lowerCamelCase : Any=True ,lowerCamelCase : Optional[int]="relu" ,lowerCamelCase : int=256 ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Optional[Any]=0.0 ,lowerCamelCase : Tuple=0.0 ,lowerCamelCase : List[str]=0.02 ,lowerCamelCase : Any=1.0 ,lowerCamelCase : Optional[int]=True ,lowerCamelCase : int=False ,lowerCamelCase : Optional[Any]="sine" ,lowerCamelCase : Dict=5 ,lowerCamelCase : List[Any]=4 ,lowerCamelCase : Optional[Any]=4 ,lowerCamelCase : Any=True ,lowerCamelCase : int=300 ,lowerCamelCase : Any=True ,lowerCamelCase : Tuple=True ,lowerCamelCase : int=1 ,lowerCamelCase : Tuple=5 ,lowerCamelCase : Union[str, Any]=2 ,lowerCamelCase : Tuple=1 ,lowerCamelCase : int=1 ,lowerCamelCase : str=5 ,lowerCamelCase : Optional[Any]=2 ,lowerCamelCase : List[Any]=0.1 ,lowerCamelCase : Union[str, Any]=0.25 ,**lowerCamelCase : int ,):
'''simple docstring'''
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
__SCREAMING_SNAKE_CASE = CONFIG_MAPPING["""resnet"""](out_features=["""stage2""", """stage3""", """stage4"""] )
else:
if isinstance(lowerCamelCase ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = backbone_config.pop("""model_type""" )
__SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type]
__SCREAMING_SNAKE_CASE = config_class.from_dict(lowerCamelCase )
__SCREAMING_SNAKE_CASE = backbone_config
__SCREAMING_SNAKE_CASE = num_queries
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = encoder_ffn_dim
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = encoder_attention_heads
__SCREAMING_SNAKE_CASE = decoder_ffn_dim
__SCREAMING_SNAKE_CASE = decoder_layers
__SCREAMING_SNAKE_CASE = decoder_attention_heads
__SCREAMING_SNAKE_CASE = dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = init_std
__SCREAMING_SNAKE_CASE = init_xavier_std
__SCREAMING_SNAKE_CASE = encoder_layerdrop
__SCREAMING_SNAKE_CASE = auxiliary_loss
__SCREAMING_SNAKE_CASE = position_embedding_type
# deformable attributes
__SCREAMING_SNAKE_CASE = num_feature_levels
__SCREAMING_SNAKE_CASE = encoder_n_points
__SCREAMING_SNAKE_CASE = decoder_n_points
__SCREAMING_SNAKE_CASE = two_stage
__SCREAMING_SNAKE_CASE = two_stage_num_proposals
__SCREAMING_SNAKE_CASE = with_box_refine
__SCREAMING_SNAKE_CASE = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
__SCREAMING_SNAKE_CASE = class_cost
__SCREAMING_SNAKE_CASE = bbox_cost
__SCREAMING_SNAKE_CASE = giou_cost
# Loss coefficients
__SCREAMING_SNAKE_CASE = mask_loss_coefficient
__SCREAMING_SNAKE_CASE = dice_loss_coefficient
__SCREAMING_SNAKE_CASE = bbox_loss_coefficient
__SCREAMING_SNAKE_CASE = giou_loss_coefficient
__SCREAMING_SNAKE_CASE = eos_coefficient
__SCREAMING_SNAKE_CASE = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase ,**lowerCamelCase )
@property
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.d_model
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
__SCREAMING_SNAKE_CASE = self.backbone_config.to_dict()
__SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 13 | 0 |
def lowerCAmelCase_ ( __a , __a ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =len(__a )
lowerCamelCase__: Dict =len(__a )
lowerCamelCase__: Any =(
first_str_length if first_str_length > second_str_length else second_str_length
)
lowerCamelCase__: List[str] =[]
for char_count in range(__a ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(__a )
if __name__ == "__main__":
print(alternative_string_arrange("AB", "XYZ"), end=" ")
| 59 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCamelCase : Dict = {
'configuration_bloom': ['BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BloomConfig', 'BloomOnnxConfig'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = ['BloomTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
'BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST',
'BloomForCausalLM',
'BloomModel',
'BloomPreTrainedModel',
'BloomForSequenceClassification',
'BloomForTokenClassification',
'BloomForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 121 | 0 |
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : bool = False ) -> bool:
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3317044064679887385961981 and not allow_probable:
raise ValueError(
"""Warning: upper bound of deterministic test is exceeded. """
"""Pass allow_probable=True to allow probabilistic test. """
"""A return value of True indicates a probable prime.""" )
# array bounds provided by analysis
lowercase_ : List[str] = [
2047,
1373653,
25326001,
3215031751,
2152302898747,
3474749660383,
341550071728321,
1,
3825123056546413051,
1,
1,
318665857834031151167461,
3317044064679887385961981,
]
lowercase_ : str = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(UpperCAmelCase__ , 1 ):
if n < _p:
# then we have our last prime to check
lowercase_ : Any = primes[:idx]
break
lowercase_ : str = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
lowercase_ : Optional[int] = False
for r in range(UpperCAmelCase__ ):
lowercase_ : Any = pow(UpperCAmelCase__ , d * 2**r , UpperCAmelCase__ )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
lowercase_ : List[Any] = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def lowerCamelCase ( ) -> None:
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838201 )
assert miller_rabin(838207 )
# 1_373_653
assert not miller_rabin(17316001 )
assert miller_rabin(17316017 )
# 25_326_001
assert not miller_rabin(3078386641 )
assert miller_rabin(3078386653 )
# 3_215_031_751
assert not miller_rabin(1713045574801 )
assert miller_rabin(1713045574819 )
# 2_152_302_898_747
assert not miller_rabin(2779799728307 )
assert miller_rabin(2779799728327 )
# 3_474_749_660_383
assert not miller_rabin(113850023909441 )
assert miller_rabin(113850023909527 )
# 341_550_071_728_321
assert not miller_rabin(1275041018848804351 )
assert miller_rabin(1275041018848804391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79666464458507787791867 )
assert miller_rabin(79666464458507787791951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552840677446647897660333 )
assert miller_rabin(552840677446647897660359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 707 | '''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : List[Any] = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30 | 0 |
"""simple docstring"""
def A ( snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = set({"""(""", """[""", """{"""} )
SCREAMING_SNAKE_CASE__ = set({""")""", """]""", """}"""} )
SCREAMING_SNAKE_CASE__ = {"""{""": """}""", """[""": """]""", """(""": """)"""}
for i in range(len(snake_case__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(snake_case__ ) == 0 or (len(snake_case__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(snake_case__ ) == 0
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = input("""Enter sequence of brackets: """ )
if is_balanced(snake_case__ ):
print(snake_case__ , """is balanced""" )
else:
print(snake_case__ , """is not balanced""" )
if __name__ == "__main__":
main()
| 196 |
"""simple docstring"""
from __future__ import annotations
def A ( snake_case__ ):
'''simple docstring'''
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(snake_case__ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(snake_case__ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 196 | 1 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self : int , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int]=1_4 , UpperCamelCase : int=7 , UpperCamelCase : Optional[Any]=True , UpperCamelCase : Dict=True , UpperCamelCase : Dict=True , UpperCamelCase : str=True , UpperCamelCase : List[Any]=True , UpperCamelCase : str=9_9 , UpperCamelCase : List[str]=3_2 , UpperCamelCase : Any=5 , UpperCamelCase : Union[str, Any]=4 , UpperCamelCase : Any=3_7 , UpperCamelCase : List[Any]="gelu" , UpperCamelCase : Tuple=0.1 , UpperCamelCase : Tuple=0.1 , UpperCamelCase : Optional[int]=5_1_2 , UpperCamelCase : List[Any]=1_6 , UpperCamelCase : Optional[int]=2 , UpperCamelCase : Union[str, Any]=0.0_2 , UpperCamelCase : Tuple=3 , UpperCamelCase : Union[str, Any]=4 , UpperCamelCase : str=None , )->Dict:
__SCREAMING_SNAKE_CASE : List[str] = parent
__SCREAMING_SNAKE_CASE : Dict = batch_size
__SCREAMING_SNAKE_CASE : Tuple = seq_length
__SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
__SCREAMING_SNAKE_CASE : int = use_token_type_ids
__SCREAMING_SNAKE_CASE : Any = use_input_mask
__SCREAMING_SNAKE_CASE : List[str] = use_labels
__SCREAMING_SNAKE_CASE : Optional[Any] = use_mc_token_ids
__SCREAMING_SNAKE_CASE : Tuple = vocab_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
__SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Any = num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
__SCREAMING_SNAKE_CASE : Any = hidden_act
__SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Any = max_position_embeddings
__SCREAMING_SNAKE_CASE : str = type_vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = type_sequence_label_size
__SCREAMING_SNAKE_CASE : List[Any] = initializer_range
__SCREAMING_SNAKE_CASE : Optional[int] = num_labels
__SCREAMING_SNAKE_CASE : Optional[Any] = num_choices
__SCREAMING_SNAKE_CASE : str = scope
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.vocab_size - 1
def __snake_case ( self : str )->Optional[int]:
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_mc_token_ids:
__SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
__SCREAMING_SNAKE_CASE : Dict = None
__SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE : List[str] = self.get_config()
__SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __snake_case ( self : List[str] )->List[Any]:
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def __snake_case ( self : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple , UpperCamelCase : Tuple , UpperCamelCase : Tuple , UpperCamelCase : Optional[int] , *UpperCamelCase : int )->Optional[int]:
__SCREAMING_SNAKE_CASE : List[Any] = CTRLModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
model(UpperCamelCase , token_type_ids=UpperCamelCase , head_mask=UpperCamelCase )
model(UpperCamelCase , token_type_ids=UpperCamelCase )
__SCREAMING_SNAKE_CASE : Any = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def __snake_case ( self : Union[str, Any] , UpperCamelCase : List[str] , UpperCamelCase : List[str] , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[Any] , *UpperCamelCase : Optional[Any] )->List[Any]:
__SCREAMING_SNAKE_CASE : List[str] = CTRLLMHeadModel(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE : Optional[int] = model(UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self : Optional[int] )->str:
__SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
),
) : Tuple = config_and_inputs
__SCREAMING_SNAKE_CASE : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask}
return config, inputs_dict
def __snake_case ( self : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] , *UpperCamelCase : List[Any] )->List[Any]:
__SCREAMING_SNAKE_CASE : int = self.num_labels
__SCREAMING_SNAKE_CASE : Optional[Any] = CTRLForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : str = model(UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class _SCREAMING_SNAKE_CASE (UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ):
lowerCAmelCase = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
lowerCAmelCase = (CTRLLMHeadModel,) if is_torch_available() else ()
lowerCAmelCase = (
{
"""feature-extraction""": CTRLModel,
"""text-classification""": CTRLForSequenceClassification,
"""text-generation""": CTRLLMHeadModel,
"""zero-shot""": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase = True
lowerCAmelCase = False
lowerCAmelCase = False
def __snake_case ( self : List[str] , UpperCamelCase : int , UpperCamelCase : Optional[int] , UpperCamelCase : str , UpperCamelCase : Any , UpperCamelCase : Optional[Any] )->Optional[int]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def __snake_case ( self : List[str] )->str:
__SCREAMING_SNAKE_CASE : List[str] = CTRLModelTester(self )
__SCREAMING_SNAKE_CASE : Any = ConfigTester(self , config_class=UpperCamelCase , n_embd=3_7 )
def __snake_case ( self : Union[str, Any] )->Optional[int]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : List[str] )->Optional[Any]:
self.config_tester.run_common_tests()
def __snake_case ( self : Any )->List[str]:
__SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*UpperCamelCase )
def __snake_case ( self : Optional[int] )->Tuple:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*UpperCamelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __snake_case ( self : Optional[int] )->Any:
pass
@slow
def __snake_case ( self : Optional[Any] )->Optional[Any]:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Dict = CTRLModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def __snake_case ( self : List[Any] )->List[str]:
pass
@require_torch
class _SCREAMING_SNAKE_CASE (unittest.TestCase ):
def __snake_case ( self : Optional[int] )->Optional[Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def __snake_case ( self : List[str] )->Optional[int]:
__SCREAMING_SNAKE_CASE : Tuple = CTRLLMHeadModel.from_pretrained("ctrl" )
model.to(UpperCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[1_1_8_5_9, 0, 1_6_1_1, 8]] , dtype=torch.long , device=UpperCamelCase ) # Legal the president is
__SCREAMING_SNAKE_CASE : List[Any] = [
1_1_8_5_9,
0,
1_6_1_1,
8,
5,
1_5_0,
2_6_4_4_9,
2,
1_9,
3_4_8,
4_6_9,
3,
2_5_9_5,
4_8,
2_0_7_4_0,
2_4_6_5_3_3,
2_4_6_5_3_3,
1_9,
3_0,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
__SCREAMING_SNAKE_CASE : Optional[int] = model.generate(UpperCamelCase , do_sample=UpperCamelCase )
self.assertListEqual(output_ids[0].tolist() , UpperCamelCase )
| 447 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
_lowerCamelCase = logging.getLogger(__name__)
@dataclass(frozen=UpperCamelCase )
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
@dataclass(frozen=UpperCamelCase )
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase = 42
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _SCREAMING_SNAKE_CASE (UpperCamelCase ):
lowerCAmelCase = 42
def __init__( self : str , UpperCamelCase : str , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : str , UpperCamelCase : Optional[int] = None , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : bool = False , )->List[str]:
__SCREAMING_SNAKE_CASE : int = hans_processors[task]()
__SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(
UpperCamelCase , "cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(UpperCamelCase ) , UpperCamelCase , ) , )
__SCREAMING_SNAKE_CASE : Optional[Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = label_list[2], label_list[1]
__SCREAMING_SNAKE_CASE : Any = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__SCREAMING_SNAKE_CASE : str = cached_features_file + ".lock"
with FileLock(UpperCamelCase ):
if os.path.exists(UpperCamelCase ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(UpperCamelCase )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
processor.get_dev_examples(UpperCamelCase ) if evaluate else processor.get_train_examples(UpperCamelCase )
)
logger.info("Training examples: %s" , len(UpperCamelCase ) )
__SCREAMING_SNAKE_CASE : Any = hans_convert_examples_to_features(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
logger.info("Saving features into cached file %s" , UpperCamelCase )
torch.save(self.features , UpperCamelCase )
def __len__( self : Tuple )->Any:
return len(self.features )
def __getitem__( self : Union[str, Any] , UpperCamelCase : int )->InputFeatures:
return self.features[i]
def __snake_case ( self : int )->Tuple:
return self.label_list
if is_tf_available():
import tensorflow as tf
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase = 42
def __init__( self : Dict , UpperCamelCase : str , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : str , UpperCamelCase : Optional[int] = 1_2_8 , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : bool = False , )->str:
__SCREAMING_SNAKE_CASE : str = hans_processors[task]()
__SCREAMING_SNAKE_CASE : Union[str, Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = label_list[2], label_list[1]
__SCREAMING_SNAKE_CASE : Union[str, Any] = label_list
__SCREAMING_SNAKE_CASE : str = processor.get_dev_examples(UpperCamelCase ) if evaluate else processor.get_train_examples(UpperCamelCase )
__SCREAMING_SNAKE_CASE : str = hans_convert_examples_to_features(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="convert examples to features" ):
if ex_index % 1_0_0_0_0 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(UpperCamelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
__SCREAMING_SNAKE_CASE : List[str] = tf.data.Dataset.from_generator(
UpperCamelCase , (
{
"example_id": tf.intaa,
"input_ids": tf.intaa,
"attention_mask": tf.intaa,
"token_type_ids": tf.intaa,
},
tf.intaa,
) , (
{
"example_id": tf.TensorShape([] ),
"input_ids": tf.TensorShape([None, None] ),
"attention_mask": tf.TensorShape([None, None] ),
"token_type_ids": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def __snake_case ( self : List[Any] )->str:
return self.dataset
def __len__( self : Tuple )->List[str]:
return len(self.features )
def __getitem__( self : Optional[Any] , UpperCamelCase : Tuple )->InputFeatures:
return self.features[i]
def __snake_case ( self : List[Any] )->Optional[int]:
return self.label_list
class _SCREAMING_SNAKE_CASE (UpperCamelCase ):
def __snake_case ( self : List[Any] , UpperCamelCase : Union[str, Any] )->Tuple:
return self._create_examples(self._read_tsv(os.path.join(UpperCamelCase , "heuristics_train_set.txt" ) ) , "train" )
def __snake_case ( self : List[str] , UpperCamelCase : Optional[int] )->Any:
return self._create_examples(self._read_tsv(os.path.join(UpperCamelCase , "heuristics_evaluation_set.txt" ) ) , "dev" )
def __snake_case ( self : Optional[int] )->Tuple:
return ["contradiction", "entailment", "neutral"]
def __snake_case ( self : List[str] , UpperCamelCase : str , UpperCamelCase : Any )->Tuple:
__SCREAMING_SNAKE_CASE : Optional[int] = []
for i, line in enumerate(UpperCamelCase ):
if i == 0:
continue
__SCREAMING_SNAKE_CASE : str = "%s-%s" % (set_type, line[0])
__SCREAMING_SNAKE_CASE : List[str] = line[5]
__SCREAMING_SNAKE_CASE : List[str] = line[6]
__SCREAMING_SNAKE_CASE : Optional[Any] = line[7][2:] if line[7].startswith("ex" ) else line[7]
__SCREAMING_SNAKE_CASE : Optional[Any] = line[0]
examples.append(InputExample(guid=UpperCamelCase , text_a=UpperCamelCase , text_b=UpperCamelCase , label=UpperCamelCase , pairID=UpperCamelCase ) )
return examples
def _lowerCAmelCase ( __lowerCamelCase : List[InputExample] , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : PreTrainedTokenizer , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = {label: i for i, label in enumerate(__lowerCamelCase )}
__SCREAMING_SNAKE_CASE : Any = []
for ex_index, example in tqdm.tqdm(enumerate(__lowerCamelCase ) , desc="convert examples to features" ):
if ex_index % 10000 == 0:
logger.info("Writing example %d" % (ex_index) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(
example.text_a , example.text_b , add_special_tokens=__lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" , truncation=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , )
__SCREAMING_SNAKE_CASE : List[str] = label_map[example.label] if example.label in label_map else 0
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(example.pairID )
features.append(InputFeatures(**__lowerCamelCase , label=__lowerCamelCase , pairID=__lowerCamelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info("*** Example ***" )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
_lowerCamelCase = {
"""hans""": 3,
}
_lowerCamelCase = {
"""hans""": HansProcessor,
}
| 447 | 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __A ( unittest.TestCase ):
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
__magic_name__: List[str] = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
__magic_name__: List[Any] = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
__magic_name__: Union[str, Any] = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
__magic_name__: Optional[int] = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 1_6_0_0_0,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
__magic_name__: int = tempfile.mkdtemp()
__magic_name__: Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__magic_name__: Tuple = os.path.join(self.tmpdirname , __snake_case )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__snake_case ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__snake_case ) + """\n""" )
# load decoder from hub
__magic_name__: Dict = """hf-internal-testing/ngram-beam-search-decoder"""
def lowerCamelCase__ ( self : Any , **__snake_case : str ) -> Optional[int]:
__magic_name__: Union[str, Any] = self.add_kwargs_tokens_map.copy()
kwargs.update(__snake_case )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def lowerCamelCase__ ( self : str , **__snake_case : int ) -> Dict:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__snake_case )
def lowerCamelCase__ ( self : int , **__snake_case : List[str] ) -> int:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ ( self : List[Any] ) -> Optional[Any]:
__magic_name__: Dict = self.get_tokenizer()
__magic_name__: Any = self.get_feature_extractor()
__magic_name__: Tuple = self.get_decoder()
__magic_name__: Tuple = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
processor.save_pretrained(self.tmpdirname )
__magic_name__: Dict = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __snake_case )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __snake_case )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __snake_case )
def lowerCamelCase__ ( self : Any ) -> Tuple:
__magic_name__: Union[str, Any] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__magic_name__: int = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[Any]:
__magic_name__: Union[str, Any] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(__snake_case , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=__snake_case , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
__magic_name__: int = self.get_feature_extractor()
__magic_name__: Optional[Any] = self.get_tokenizer()
__magic_name__: List[Any] = self.get_decoder()
__magic_name__: int = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
__magic_name__: Tuple = floats_list((3, 1_0_0_0) )
__magic_name__: List[str] = feature_extractor(__snake_case , return_tensors="""np""" )
__magic_name__: Tuple = processor(__snake_case , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[str]:
__magic_name__: Tuple = self.get_feature_extractor()
__magic_name__: List[str] = self.get_tokenizer()
__magic_name__: str = self.get_decoder()
__magic_name__: Tuple = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
__magic_name__: Optional[int] = """This is a test string"""
__magic_name__: List[str] = processor(text=__snake_case )
__magic_name__: Tuple = tokenizer(__snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase__ ( self : int , __snake_case : List[str]=(2, 1_0, 1_6) , __snake_case : List[Any]=7_7 ) -> Dict:
np.random.seed(__snake_case )
return np.random.rand(*__snake_case )
def lowerCamelCase__ ( self : Any ) -> Any:
__magic_name__: int = self.get_feature_extractor()
__magic_name__: Tuple = self.get_tokenizer()
__magic_name__: Any = self.get_decoder()
__magic_name__: Tuple = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
__magic_name__: List[Any] = self._get_dummy_logits(shape=(1_0, 1_6) , seed=1_3 )
__magic_name__: str = processor.decode(__snake_case )
__magic_name__: Optional[int] = decoder.decode_beams(__snake_case )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def lowerCamelCase__ ( self : int , __snake_case : Dict ) -> Any:
__magic_name__: int = self.get_feature_extractor()
__magic_name__: List[Any] = self.get_tokenizer()
__magic_name__: int = self.get_decoder()
__magic_name__: Optional[Any] = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
__magic_name__: Optional[int] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__magic_name__: Optional[int] = processor.batch_decode(__snake_case )
else:
with get_context(__snake_case ).Pool() as pool:
__magic_name__: Any = processor.batch_decode(__snake_case , __snake_case )
__magic_name__: Dict = list(__snake_case )
with get_context("""fork""" ).Pool() as p:
__magic_name__: List[str] = decoder.decode_beams_batch(__snake_case , __snake_case )
__magic_name__, __magic_name__, __magic_name__: Optional[int] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__snake_case , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(__snake_case , decoded_processor.logit_score )
self.assertListEqual(__snake_case , decoded_processor.lm_score )
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
__magic_name__: List[str] = self.get_feature_extractor()
__magic_name__: Optional[Any] = self.get_tokenizer()
__magic_name__: Optional[int] = self.get_decoder()
__magic_name__: Dict = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
__magic_name__: str = self._get_dummy_logits()
__magic_name__: Dict = 1_5
__magic_name__: int = -20.0
__magic_name__: int = -4.0
__magic_name__: Dict = processor.batch_decode(
__snake_case , beam_width=__snake_case , beam_prune_logp=__snake_case , token_min_logp=__snake_case , )
__magic_name__: Optional[int] = decoded_processor_out.text
__magic_name__: Union[str, Any] = list(__snake_case )
with get_context("""fork""" ).Pool() as pool:
__magic_name__: str = decoder.decode_beams_batch(
__snake_case , __snake_case , beam_width=__snake_case , beam_prune_logp=__snake_case , token_min_logp=__snake_case , )
__magic_name__: Any = [d[0][0] for d in decoded_decoder_out]
__magic_name__: Optional[int] = [d[0][2] for d in decoded_decoder_out]
__magic_name__: Optional[Any] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__snake_case , __snake_case )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , __snake_case )
self.assertTrue(np.array_equal(__snake_case , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , __snake_case , atol=1E-3 ) )
self.assertTrue(np.array_equal(__snake_case , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , __snake_case , atol=1E-3 ) )
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
__magic_name__: int = self.get_feature_extractor()
__magic_name__: Any = self.get_tokenizer()
__magic_name__: Union[str, Any] = self.get_decoder()
__magic_name__: str = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
__magic_name__: Any = self._get_dummy_logits()
__magic_name__: Union[str, Any] = 2.0
__magic_name__: Optional[Any] = 5.0
__magic_name__: Optional[Any] = -20.0
__magic_name__: List[str] = True
__magic_name__: List[Any] = processor.batch_decode(
__snake_case , alpha=__snake_case , beta=__snake_case , unk_score_offset=__snake_case , lm_score_boundary=__snake_case , )
__magic_name__: Union[str, Any] = decoded_processor_out.text
__magic_name__: Union[str, Any] = list(__snake_case )
decoder.reset_params(
alpha=__snake_case , beta=__snake_case , unk_score_offset=__snake_case , lm_score_boundary=__snake_case , )
with get_context("""fork""" ).Pool() as pool:
__magic_name__: str = decoder.decode_beams_batch(
__snake_case , __snake_case , )
__magic_name__: List[str] = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__snake_case , __snake_case )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , __snake_case )
__magic_name__: List[str] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , __snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[Any]:
__magic_name__: List[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__magic_name__: Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key]
__magic_name__: Union[str, Any] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__magic_name__: Optional[int] = os.listdir(__snake_case )
__magic_name__: Union[str, Any] = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__snake_case , __snake_case )
def lowerCamelCase__ ( self : Any ) -> Any:
__magic_name__: int = snapshot_download("""hf-internal-testing/processor_with_lm""" )
__magic_name__: List[Any] = WavaVecaProcessorWithLM.from_pretrained(__snake_case )
__magic_name__: Any = processor.decoder.model_container[processor.decoder._model_key]
__magic_name__: int = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__magic_name__: str = os.listdir(__snake_case )
__magic_name__: Tuple = os.listdir(__snake_case )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__snake_case , __snake_case )
def lowerCamelCase__ ( self : Optional[int] ) -> int:
__magic_name__: List[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__magic_name__: List[str] = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__magic_name__: List[str] = floats_list((3, 1_0_0_0) )
__magic_name__: Tuple = processor_wavaveca(__snake_case , return_tensors="""np""" )
__magic_name__: Optional[Any] = processor_auto(__snake_case , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
__magic_name__: int = self._get_dummy_logits()
__magic_name__: List[Any] = processor_wavaveca.batch_decode(__snake_case )
__magic_name__: Union[str, Any] = processor_auto.batch_decode(__snake_case )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
__magic_name__: Optional[int] = self.get_feature_extractor()
__magic_name__: Any = self.get_tokenizer()
__magic_name__: Dict = self.get_decoder()
__magic_name__: List[str] = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def lowerCamelCase__ ( __snake_case : Optional[int] , __snake_case : int ) -> int:
__magic_name__: Any = [d[key] for d in offsets]
return retrieved_list
def lowerCamelCase__ ( self : str ) -> Union[str, Any]:
__magic_name__: Tuple = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__magic_name__: Tuple = self._get_dummy_logits()[0]
__magic_name__: List[Any] = processor.decode(__snake_case , output_word_offsets=__snake_case )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__snake_case , __snake_case ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def lowerCamelCase__ ( self : Optional[int] ) -> Dict:
__magic_name__: Optional[int] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__magic_name__: Optional[int] = self._get_dummy_logits()
__magic_name__: Any = processor.batch_decode(__snake_case , output_word_offsets=__snake_case )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__snake_case , __snake_case ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(__snake_case , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
import torch
__magic_name__: List[Any] = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=__snake_case )
__magic_name__: Dict = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=1_6_0_0_0 ) )
__magic_name__: Any = iter(__snake_case )
__magic_name__: Optional[int] = next(__snake_case )
__magic_name__: Optional[int] = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
__magic_name__: Tuple = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__magic_name__: List[str] = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
__magic_name__: List[Any] = model(__snake_case ).logits.cpu().numpy()
__magic_name__: Optional[Any] = processor.decode(logits[0] , output_word_offsets=__snake_case )
__magic_name__: List[str] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__magic_name__: str = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
__magic_name__: Tuple = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(__snake_case , """word""" ) ) , __snake_case )
self.assertEqual(""" """.join(self.get_from_offsets(__snake_case , """word""" ) ) , output.text )
# output times
__magic_name__: Dict = torch.tensor(self.get_from_offsets(__snake_case , """start_time""" ) )
__magic_name__: Optional[Any] = torch.tensor(self.get_from_offsets(__snake_case , """end_time""" ) )
# fmt: off
__magic_name__: Tuple = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
__magic_name__: int = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=0.01 ) )
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=0.01 ) )
| 96 | 0 |
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class A_ ( __UpperCamelCase ):
'''simple docstring'''
def _snake_case ( self: Dict ):
__lowerCamelCase : Union[str, Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(a , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(a , 'num_attention_heads' ) )
self.parent.assertTrue(hasattr(a , 'num_encoder_blocks' ) )
class A_ :
'''simple docstring'''
def __init__( self: List[str] , a: List[str] , a: Union[str, Any]=13 , a: str=64 , a: Optional[Any]=3 , a: Any=4 , a: Optional[Any]=[2, 2, 2, 2] , a: Optional[Any]=[8, 4, 2, 1] , a: Optional[int]=[16, 32, 64, 128] , a: Tuple=[1, 4, 8, 16] , a: Tuple=[1, 2, 4, 8] , a: Optional[Any]=True , a: Dict=True , a: Any="gelu" , a: List[Any]=0.1 , a: Any=0.1 , a: Dict=0.0_2 , a: str=3 , a: List[Any]=None , ):
__lowerCamelCase : int = parent
__lowerCamelCase : str = batch_size
__lowerCamelCase : str = image_size
__lowerCamelCase : Any = num_channels
__lowerCamelCase : Dict = num_encoder_blocks
__lowerCamelCase : Tuple = sr_ratios
__lowerCamelCase : Optional[int] = depths
__lowerCamelCase : Tuple = hidden_sizes
__lowerCamelCase : Union[str, Any] = downsampling_rates
__lowerCamelCase : Any = num_attention_heads
__lowerCamelCase : Dict = is_training
__lowerCamelCase : int = use_labels
__lowerCamelCase : List[Any] = hidden_act
__lowerCamelCase : Any = hidden_dropout_prob
__lowerCamelCase : List[str] = attention_probs_dropout_prob
__lowerCamelCase : List[Any] = initializer_range
__lowerCamelCase : Optional[int] = num_labels
__lowerCamelCase : Optional[int] = scope
def _snake_case ( self: str ):
__lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : str = None
if self.use_labels:
__lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowerCamelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _snake_case ( self: Union[str, Any] ):
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _snake_case ( self: int , a: List[str] , a: List[Any] , a: int ):
__lowerCamelCase : Optional[Any] = SegformerModel(config=a )
model.to(a )
model.eval()
__lowerCamelCase : List[Any] = model(a )
__lowerCamelCase : List[str] = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def _snake_case ( self: List[str] , a: Dict , a: Optional[int] , a: str ):
__lowerCamelCase : Optional[Any] = self.num_labels
__lowerCamelCase : Optional[int] = SegformerForSemanticSegmentation(a )
model.to(a )
model.eval()
__lowerCamelCase : Optional[Any] = model(a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
__lowerCamelCase : Union[str, Any] = model(a , labels=a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def _snake_case ( self: Optional[Any] , a: List[Any] , a: Dict , a: Tuple ):
__lowerCamelCase : List[Any] = 1
__lowerCamelCase : List[Any] = SegformerForSemanticSegmentation(config=a )
model.to(a )
model.eval()
__lowerCamelCase : List[str] = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(a )
__lowerCamelCase : Any = model(a , labels=a )
self.parent.assertGreater(result.loss , 0.0 )
def _snake_case ( self: Tuple ):
__lowerCamelCase : Tuple = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[int] = config_and_inputs
__lowerCamelCase : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
__snake_case = (
{
"""feature-extraction""": SegformerModel,
"""image-classification""": SegformerForImageClassification,
"""image-segmentation""": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__snake_case = True
__snake_case = False
__snake_case = False
__snake_case = False
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Union[str, Any] = SegformerModelTester(self )
__lowerCamelCase : Dict = SegformerConfigTester(self , config_class=a )
def _snake_case ( self: Optional[int] ):
self.config_tester.run_common_tests()
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _snake_case ( self: Dict ):
__lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*a )
def _snake_case ( self: str ):
__lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*a )
@unittest.skip('SegFormer does not use inputs_embeds' )
def _snake_case ( self: Union[str, Any] ):
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods' )
def _snake_case ( self: str ):
pass
def _snake_case ( self: Any ):
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Any = model_class(a )
__lowerCamelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : Tuple = [*signature.parameters.keys()]
__lowerCamelCase : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _snake_case ( self: Optional[int] ):
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Dict = True
for model_class in self.all_model_classes:
__lowerCamelCase : Optional[Any] = True
__lowerCamelCase : str = False
__lowerCamelCase : Dict = True
__lowerCamelCase : int = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
__lowerCamelCase : int = model(**self._prepare_for_class(a , a ) )
__lowerCamelCase : Optional[Any] = outputs.attentions
__lowerCamelCase : Dict = sum(self.model_tester.depths )
self.assertEqual(len(a ) , a )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCamelCase : int = True
__lowerCamelCase : str = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
__lowerCamelCase : Optional[Any] = model(**self._prepare_for_class(a , a ) )
__lowerCamelCase : Any = outputs.attentions
self.assertEqual(len(a ) , a )
# verify the first attentions (first block, first layer)
__lowerCamelCase : List[str] = (self.model_tester.image_size // 4) ** 2
__lowerCamelCase : Any = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
__lowerCamelCase : str = (self.model_tester.image_size // 32) ** 2
__lowerCamelCase : Tuple = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
__lowerCamelCase : Dict = len(a )
# Check attention is always last and order is fine
__lowerCamelCase : Optional[int] = True
__lowerCamelCase : Union[str, Any] = True
__lowerCamelCase : Any = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
__lowerCamelCase : Dict = model(**self._prepare_for_class(a , a ) )
self.assertEqual(out_len + 1 , len(a ) )
__lowerCamelCase : Any = outputs.attentions
self.assertEqual(len(a ) , a )
# verify the first attentions (first block, first layer)
__lowerCamelCase : Any = (self.model_tester.image_size // 4) ** 2
__lowerCamelCase : Any = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _snake_case ( self: Optional[int] ):
def check_hidden_states_output(a: Optional[Any] , a: List[Any] , a: Tuple ):
__lowerCamelCase : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
__lowerCamelCase : Optional[int] = model(**self._prepare_for_class(a , a ) )
__lowerCamelCase : str = outputs.hidden_states
__lowerCamelCase : Any = self.model_tester.num_encoder_blocks
self.assertEqual(len(a ) , a )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : int = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase : int = True
check_hidden_states_output(a , a , a )
def _snake_case ( self: Optional[Any] ):
if not self.model_tester.is_training:
return
__lowerCamelCase , __lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Optional[int] = True
for model_class in self.all_model_classes:
if model_class in get_values(a ):
continue
__lowerCamelCase : Union[str, Any] = model_class(a )
model.to(a )
model.train()
__lowerCamelCase : int = self._prepare_for_class(a , a , return_labels=a )
__lowerCamelCase : Tuple = model(**a ).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _snake_case ( self: Any ):
pass
@slow
def _snake_case ( self: Optional[Any] ):
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Optional[Any] = SegformerModel.from_pretrained(a )
self.assertIsNotNone(a )
def UpperCamelCase__ ( ):
__lowerCamelCase : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _snake_case ( self: str ):
# only resize + normalize
__lowerCamelCase : int = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=a , align=a , do_random_crop=a )
__lowerCamelCase : Optional[int] = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
a )
__lowerCamelCase : int = prepare_img()
__lowerCamelCase : Optional[int] = image_processor(images=a , return_tensors='pt' )
__lowerCamelCase : Tuple = encoded_inputs.pixel_values.to(a )
with torch.no_grad():
__lowerCamelCase : Dict = model(a )
__lowerCamelCase : Optional[Any] = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , a )
__lowerCamelCase : int = torch.tensor(
[
[[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]],
[[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]],
[[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]],
] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , a , atol=1e-4 ) )
@slow
def _snake_case ( self: str ):
# only resize + normalize
__lowerCamelCase : Optional[Any] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=a , align=a , do_random_crop=a )
__lowerCamelCase : Any = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024' ).to(a )
__lowerCamelCase : int = prepare_img()
__lowerCamelCase : List[str] = image_processor(images=a , return_tensors='pt' )
__lowerCamelCase : Any = encoded_inputs.pixel_values.to(a )
with torch.no_grad():
__lowerCamelCase : List[Any] = model(a )
__lowerCamelCase : List[str] = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , a )
__lowerCamelCase : Dict = torch.tensor(
[
[[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]],
[[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]],
[[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]],
] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , a , atol=1e-1 ) )
@slow
def _snake_case ( self: Any ):
# only resize + normalize
__lowerCamelCase : Any = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=a , align=a , do_random_crop=a )
__lowerCamelCase : int = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
a )
__lowerCamelCase : Union[str, Any] = prepare_img()
__lowerCamelCase : Dict = image_processor(images=a , return_tensors='pt' )
__lowerCamelCase : Any = encoded_inputs.pixel_values.to(a )
with torch.no_grad():
__lowerCamelCase : Union[str, Any] = model(a )
__lowerCamelCase : Any = outputs.logits.detach().cpu()
__lowerCamelCase : str = image_processor.post_process_semantic_segmentation(outputs=a , target_sizes=[(500, 300)] )
__lowerCamelCase : str = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , a )
__lowerCamelCase : Dict = image_processor.post_process_semantic_segmentation(outputs=a )
__lowerCamelCase : Tuple = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , a )
| 230 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = '▁'
lowercase_ = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = BigBirdTokenizer
__snake_case = BigBirdTokenizerFast
__snake_case = True
__snake_case = True
def _snake_case ( self: int ):
super().setUp()
__lowerCamelCase : str = self.tokenizer_class(a , keep_accents=a )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self: str ):
__lowerCamelCase : int = '<s>'
__lowerCamelCase : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def _snake_case ( self: Tuple ):
__lowerCamelCase : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '[MASK]' )
self.assertEqual(len(a ) , 1004 )
def _snake_case ( self: Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _snake_case ( self: Dict ):
if not self.test_rust_tokenizer:
return
__lowerCamelCase : List[str] = self.get_tokenizer()
__lowerCamelCase : Union[str, Any] = self.get_rust_tokenizer()
__lowerCamelCase : str = 'I was born in 92000, and this is falsé.'
__lowerCamelCase : List[Any] = tokenizer.tokenize(a )
__lowerCamelCase : List[Any] = rust_tokenizer.tokenize(a )
self.assertListEqual(a , a )
__lowerCamelCase : str = tokenizer.encode(a , add_special_tokens=a )
__lowerCamelCase : str = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
__lowerCamelCase : Optional[Any] = self.get_rust_tokenizer()
__lowerCamelCase : Tuple = tokenizer.encode(a )
__lowerCamelCase : str = rust_tokenizer.encode(a )
self.assertListEqual(a , a )
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Tuple = BigBirdTokenizer(a , keep_accents=a )
__lowerCamelCase : Union[str, Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(a , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [285, 46, 10, 170, 382] , )
__lowerCamelCase : Any = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__lowerCamelCase : int = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(
a , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__lowerCamelCase : Optional[int] = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def _snake_case ( self: Dict ):
return BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' )
@slow
def _snake_case ( self: List[Any] ):
__lowerCamelCase : Tuple = 'Hello World!'
__lowerCamelCase : Optional[int] = [65, 1_8536, 2260, 101, 66]
self.assertListEqual(a , self.big_tokenizer.encode(a ) )
@slow
def _snake_case ( self: Dict ):
__lowerCamelCase : List[str] = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
# fmt: off
__lowerCamelCase : int = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 3_4324, 497, 391, 408, 1_1342, 1244, 385, 100, 938, 985, 456, 574, 362, 1_2597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(a , self.big_tokenizer.encode(a ) )
@require_torch
@slow
def _snake_case ( self: List[Any] ):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
__lowerCamelCase : Optional[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
__lowerCamelCase : int = ' '.join(a )
__lowerCamelCase : List[str] = self.big_tokenizer.encode_plus(a , return_tensors='pt' , return_token_type_ids=a )
__lowerCamelCase : Any = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=a )
__lowerCamelCase : Optional[int] = BigBirdConfig(attention_type='original_full' )
__lowerCamelCase : Any = BigBirdModel(a )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**a )
model(**a )
@slow
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase : Optional[int] = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' )
__lowerCamelCase : Union[str, Any] = tokenizer.decode(tokenizer('Paris is the [MASK].' ).input_ids )
self.assertTrue(decoded_text == '[CLS] Paris is the[MASK].[SEP]' )
@slow
def _snake_case ( self: List[Any] ):
# fmt: off
__lowerCamelCase : Optional[Any] = {'input_ids': [[65, 3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114, 66], [65, 448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name='google/bigbird-roberta-base' , revision='215c99f1600e06f83acce68422f2035b2b5c3510' , )
| 230 | 1 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
_lowercase : List[Any] =logging.get_logger(__name__)
class UpperCamelCase_ ( snake_case__ ):
def __init__( self : Optional[Any] , *lowerCamelCase : List[Any] , **lowerCamelCase : Optional[Any] ):
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 364 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_lowercase : str =16
_lowercase : int =32
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ = 16 ,lowerCAmelCase__ = "bert-base-cased" ):
lowerCamelCase_ : Any = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
lowerCamelCase_ : Tuple = load_dataset('glue' ,'mrpc' )
def tokenize_function(lowerCAmelCase__ ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_ : Optional[Any] = tokenizer(examples['sentence1'] ,examples['sentence2'] ,truncation=lowerCAmelCase__ ,max_length=lowerCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCamelCase_ : Dict = datasets.map(
lowerCAmelCase__ ,batched=lowerCAmelCase__ ,remove_columns=['idx', 'sentence1', 'sentence2'] ,load_from_cache_file=lowerCAmelCase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase_ : List[Any] = tokenized_datasets.rename_column('label' ,'labels' )
def collate_fn(lowerCAmelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCAmelCase__ ,padding='max_length' ,max_length=1_28 ,return_tensors='pt' )
return tokenizer.pad(lowerCAmelCase__ ,padding='longest' ,return_tensors='pt' )
# Instantiate dataloaders.
lowerCamelCase_ : int = DataLoader(
tokenized_datasets['train'] ,shuffle=lowerCAmelCase__ ,collate_fn=lowerCAmelCase__ ,batch_size=lowerCAmelCase__ )
lowerCamelCase_ : str = DataLoader(
tokenized_datasets['validation'] ,shuffle=lowerCAmelCase__ ,collate_fn=lowerCAmelCase__ ,batch_size=lowerCAmelCase__ )
return train_dataloader, eval_dataloader
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
model.eval()
lowerCamelCase_ : List[str] = 0
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase_ : Dict = model(**lowerCAmelCase__ )
lowerCamelCase_ : Any = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCamelCase_ , lowerCamelCase_ : Tuple = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowerCAmelCase__ ) - 1:
lowerCamelCase_ : Union[str, Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCamelCase_ : List[str] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowerCAmelCase__ ,references=lowerCAmelCase__ ,)
lowerCamelCase_ : Tuple = metric.compute()
return eval_metric["accuracy"]
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ):
# Initialize accelerator
lowerCamelCase_ : List[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_ : Union[str, Any] = config['lr']
lowerCamelCase_ : List[Any] = int(config['num_epochs'] )
lowerCamelCase_ : Optional[int] = int(config['seed'] )
lowerCamelCase_ : Optional[int] = int(config['batch_size'] )
lowerCamelCase_ : Optional[Any] = args.model_name_or_path
set_seed(lowerCAmelCase__ )
lowerCamelCase_ , lowerCamelCase_ : List[Any] = get_dataloaders(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_ : List[Any] = AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase__ ,return_dict=lowerCAmelCase__ )
# Instantiate optimizer
lowerCamelCase_ : List[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCamelCase_ : Union[str, Any] = optimizer_cls(params=model.parameters() ,lr=lowerCAmelCase__ )
if accelerator.state.deepspeed_plugin is not None:
lowerCamelCase_ : Tuple = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
lowerCamelCase_ : Tuple = 1
lowerCamelCase_ : str = (len(lowerCAmelCase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCamelCase_ : Tuple = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase__ ,num_warmup_steps=0 ,num_training_steps=lowerCAmelCase__ ,)
else:
lowerCamelCase_ : Optional[int] = DummyScheduler(lowerCAmelCase__ ,total_num_steps=lowerCAmelCase__ ,warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Dict = accelerator.prepare(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase_ : List[str] = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCamelCase_ : Any = 0
lowerCamelCase_ : Tuple = evaluate.load('glue' ,'mrpc' )
lowerCamelCase_ : List[Any] = num_epochs
if args.partial_train_epoch is not None:
lowerCamelCase_ : Any = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
lowerCamelCase_ : List[Any] = args.resume_from_checkpoint.split('epoch_' )[1]
lowerCamelCase_ : str = ''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowerCamelCase_ : List[str] = int(lowerCAmelCase__ ) + 1
lowerCamelCase_ : Optional[Any] = evaluation_loop(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
accelerator.print('resumed checkpoint performance:' ,lowerCAmelCase__ )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:' ,lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:' ,optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir ,F"state_{starting_epoch-1}.json" ) ,'r' ) as f:
lowerCamelCase_ : Union[str, Any] = json.load(lowerCAmelCase__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowerCamelCase_ : str = {}
for epoch in range(lowerCAmelCase__ ,lowerCAmelCase__ ):
model.train()
for step, batch in enumerate(lowerCAmelCase__ ):
lowerCamelCase_ : List[Any] = model(**lowerCAmelCase__ )
lowerCamelCase_ : Tuple = outputs.loss
lowerCamelCase_ : str = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowerCamelCase_ : int = F"epoch_{epoch}"
lowerCamelCase_ : Optional[int] = os.path.join(args.output_dir ,lowerCAmelCase__ )
accelerator.save_state(lowerCAmelCase__ )
lowerCamelCase_ : Optional[Any] = evaluation_loop(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ : List[str] = accuracy
lowerCamelCase_ : List[str] = lr_scheduler.get_lr()[0]
lowerCamelCase_ : str = optimizer.param_groups[0]['lr']
lowerCamelCase_ : Any = epoch
lowerCamelCase_ : Union[str, Any] = overall_step
accelerator.print(F"epoch {epoch}:" ,lowerCAmelCase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir ,F"state_{epoch}.json" ) ,'w' ) as f:
json.dump(lowerCAmelCase__ ,lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( ):
lowerCamelCase_ : Any = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' ,type=lowerCAmelCase__ ,default='bert-base-cased' ,help='Path to pretrained model or model identifier from huggingface.co/models.' ,required=lowerCAmelCase__ ,)
parser.add_argument(
'--output_dir' ,type=lowerCAmelCase__ ,default='.' ,help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' ,)
parser.add_argument(
'--resume_from_checkpoint' ,type=lowerCAmelCase__ ,default=lowerCAmelCase__ ,help='If the training should continue from a checkpoint folder.' ,)
parser.add_argument(
'--partial_train_epoch' ,type=lowerCAmelCase__ ,default=lowerCAmelCase__ ,help='If passed, the training will stop after this number of epochs.' ,)
parser.add_argument(
'--num_epochs' ,type=lowerCAmelCase__ ,default=2 ,help='Number of train epochs.' ,)
lowerCamelCase_ : Optional[Any] = parser.parse_args()
lowerCamelCase_ : List[Any] = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(lowerCAmelCase__ ,lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 364 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __lowercase ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
SCREAMING_SNAKE_CASE = "CIDAS/clipseg-rd64-refined"
SCREAMING_SNAKE_CASE = "image_segmenter"
SCREAMING_SNAKE_CASE = CLIPSegForImageSegmentation
SCREAMING_SNAKE_CASE = ["image", "text"]
SCREAMING_SNAKE_CASE = ["image"]
def __init__( self : Any , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : List[Any] ):
"""simple docstring"""
requires_backends(self , ["""vision"""] )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase_ ( self : Union[str, Any] , UpperCamelCase_ : "Image" , UpperCamelCase_ : str ):
"""simple docstring"""
return self.pre_processor(text=[label] , images=[image] , padding=UpperCamelCase_ , return_tensors="""pt""" )
def lowerCAmelCase_ ( self : Union[str, Any] , UpperCamelCase_ : Dict ):
"""simple docstring"""
with torch.no_grad():
__A = self.model(**UpperCamelCase_ ).logits
return logits
def lowerCAmelCase_ ( self : Dict , UpperCamelCase_ : Dict ):
"""simple docstring"""
__A = outputs.cpu().detach().numpy()
__A = 0
__A = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 720 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowercase ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE = "AutoImageProcessor"
SCREAMING_SNAKE_CASE = "AutoTokenizer"
def __init__( self : str , UpperCamelCase_ : Any , UpperCamelCase_ : str ):
"""simple docstring"""
super().__init__(UpperCamelCase_ , UpperCamelCase_ )
__A = self.image_processor
def __call__( self : Optional[Any] , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : List[Any]=None , **UpperCamelCase_ : int ):
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
__A = self.tokenizer(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
if images is not None:
__A = self.image_processor(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
if text is not None and images is not None:
__A = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase_ ) , tensor_type=UpperCamelCase_ )
def lowerCAmelCase_ ( self : List[str] , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : List[str] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase_ ( self : Any , *UpperCamelCase_ : int , **UpperCamelCase_ : Tuple ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ )
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
return ["input_ids", "attention_mask", "pixel_values"]
| 199 | 0 |
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
a_ :Union[str, Any] = imread(R"digital_image_processing/image_data/lena_small.jpg")
a_ :Any = cvtColor(img, COLOR_BGR2GRAY)
def lowercase_ ():
snake_case__ : Optional[int] = cn.convert_to_negative(A )
# assert negative_img array for at least one True
assert negative_img.any()
def lowercase_ ():
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(A , 1_1_0 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def lowercase_ ():
snake_case__ : Tuple = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowercase_ ():
snake_case__ : Union[str, Any] = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
snake_case__ : Dict = canny.canny(A )
# assert canny array for at least one True
assert canny_array.any()
def lowercase_ ():
assert gg.gaussian_filter(A , 5 , sigma=0.9 ).all()
def lowercase_ ():
# laplace diagonals
snake_case__ : int = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
snake_case__ : Optional[Any] = conv.img_convolve(A , A ).astype(A )
assert res.any()
def lowercase_ ():
assert med.median_filter(A , 3 ).any()
def lowercase_ ():
snake_case__ , snake_case__ : Optional[Any] = sob.sobel_filter(A )
assert grad.any() and theta.any()
def lowercase_ ():
snake_case__ : int = sp.make_sepia(A , 2_0 )
assert sepia.all()
def lowercase_ (A : str = "digital_image_processing/image_data/lena_small.jpg" ):
snake_case__ : int = bs.Burkes(imread(A , 1 ) , 1_2_0 )
burkes.process()
assert burkes.output_img.any()
def lowercase_ (A : str = "digital_image_processing/image_data/lena_small.jpg" , ):
snake_case__ : int = rs.NearestNeighbour(imread(A , 1 ) , 4_0_0 , 2_0_0 )
nn.process()
assert nn.output.any()
def lowercase_ ():
snake_case__ : Union[str, Any] = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
snake_case__ : Any = imread(A , 0 )
# Test for get_neighbors_pixel function() return not None
snake_case__ : Dict = 0
snake_case__ : Optional[int] = 0
snake_case__ : List[Any] = image[x_coordinate][y_coordinate]
snake_case__ : List[str] = lbp.get_neighbors_pixel(
A , A , A , A )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
snake_case__ : Any = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
snake_case__ : Tuple = lbp.local_binary_value(A , A , A )
assert lbp_image.any()
| 478 |
def lowercase_ (A : int , A : int ):
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
snake_case__ : List[str] = str(bin(A ) )[2:] # remove the leading "0b"
snake_case__ : int = str(bin(A ) )[2:] # remove the leading "0b"
snake_case__ : Dict = max(len(A ) , len(A ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(A ) , b_binary.zfill(A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 478 | 1 |
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
lowerCamelCase = False
lowerCamelCase = True
lowerCamelCase = False
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--repo_path''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
lowerCamelCase = parser.parse_args()
lowerCamelCase = {
'''image_size''': '''sample_size''',
'''num_res_blocks''': '''layers_per_block''',
'''block_channels''': '''block_out_channels''',
'''down_blocks''': '''down_block_types''',
'''up_blocks''': '''up_block_types''',
'''downscale_freq_shift''': '''freq_shift''',
'''resnet_num_groups''': '''norm_num_groups''',
'''resnet_act_fn''': '''act_fn''',
'''resnet_eps''': '''norm_eps''',
'''num_head_channels''': '''attention_head_dim''',
}
lowerCamelCase = {
'''time_steps''': '''time_proj''',
'''mid''': '''mid_block''',
'''downsample_blocks''': '''down_blocks''',
'''upsample_blocks''': '''up_blocks''',
}
lowerCamelCase = '''''' if has_file(args.repo_path, '''config.json''') else '''unet'''
with open(os.path.join(args.repo_path, subfolder, '''config.json'''), '''r''', encoding='''utf-8''') as reader:
lowerCamelCase = reader.read()
lowerCamelCase = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, '''config.json'''):
lowerCamelCase = UNetaDModel(**config)
else:
lowerCamelCase = UNetaDConditionModel if '''ldm-text2im-large-256''' in args.repo_path else UNetaDModel
lowerCamelCase = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
lowerCamelCase = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
lowerCamelCase = config[key]
del config[key]
lowerCamelCase = [k.replace('''UNetRes''', '''''') for k in config['''down_block_types''']]
lowerCamelCase = [k.replace('''UNetRes''', '''''') for k in config['''up_block_types''']]
if do_only_weights:
lowerCamelCase = torch.load(os.path.join(args.repo_path, subfolder, '''diffusion_pytorch_model.bin'''))
lowerCamelCase = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('''.op.bias''') or param_key.endswith('''.op.weight'''):
continue
lowerCamelCase = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('''.''')[0] == key:
lowerCamelCase = param_value
lowerCamelCase = True
if not has_changed:
lowerCamelCase = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 102 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class snake_case_ ( _a ):
"""simple docstring"""
__UpperCAmelCase =42
class snake_case_ ( _a , _a ):
"""simple docstring"""
__UpperCAmelCase =True
@register_to_config
def __init__( self , _A = 3 , _A = 3 , _A = ("DownEncoderBlock2D",) , _A = ("UpDecoderBlock2D",) , _A = (6_4,) , _A = 1 , _A = "silu" , _A = 4 , _A = 3_2 , _A = 3_2 , _A = 0.1_8215 , ):
super().__init__()
# pass init params to Encoder
__lowerCAmelCase = Encoder(
in_channels=_A , out_channels=_A , down_block_types=_A , block_out_channels=_A , layers_per_block=_A , act_fn=_A , norm_num_groups=_A , double_z=_A , )
# pass init params to Decoder
__lowerCAmelCase = Decoder(
in_channels=_A , out_channels=_A , up_block_types=_A , block_out_channels=_A , layers_per_block=_A , norm_num_groups=_A , act_fn=_A , )
__lowerCAmelCase = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
__lowerCAmelCase = nn.Convad(_A , _A , 1 )
__lowerCAmelCase = False
__lowerCAmelCase = False
# only relevant if vae tiling is enabled
__lowerCAmelCase = self.config.sample_size
__lowerCAmelCase = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
__lowerCAmelCase = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__lowerCAmelCase = 0.25
def A__ ( self , _A , _A=False ):
if isinstance(_A , (Encoder, Decoder) ):
__lowerCAmelCase = value
def A__ ( self , _A = True ):
__lowerCAmelCase = use_tiling
def A__ ( self ):
self.enable_tiling(_A )
def A__ ( self ):
__lowerCAmelCase = True
def A__ ( self ):
__lowerCAmelCase = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def A__ ( self ):
__lowerCAmelCase = {}
def fn_recursive_add_processors(_A , _A , _A ):
if hasattr(_A , 'set_processor' ):
__lowerCAmelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"""{name}.{sub_name}""" , _A , _A )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_A , _A , _A )
return processors
def A__ ( self , _A ):
__lowerCAmelCase = len(self.attn_processors.keys() )
if isinstance(_A , _A ) and len(_A ) != count:
raise ValueError(
F"""A dict of processors was passed, but the number of processors {len(_A )} does not match the"""
F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(_A , _A , _A ):
if hasattr(_A , 'set_processor' ):
if not isinstance(_A , _A ):
module.set_processor(_A )
else:
module.set_processor(processor.pop(F"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"""{name}.{sub_name}""" , _A , _A )
for name, module in self.named_children():
fn_recursive_attn_processor(_A , _A , _A )
def A__ ( self ):
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def A__ ( self , _A , _A = True ):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_A , return_dict=_A )
if self.use_slicing and x.shape[0] > 1:
__lowerCAmelCase = [self.encoder(_A ) for x_slice in x.split(1 )]
__lowerCAmelCase = torch.cat(_A )
else:
__lowerCAmelCase = self.encoder(_A )
__lowerCAmelCase = self.quant_conv(_A )
__lowerCAmelCase = DiagonalGaussianDistribution(_A )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_A )
def A__ ( self , _A , _A = True ):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_A , return_dict=_A )
__lowerCAmelCase = self.post_quant_conv(_A )
__lowerCAmelCase = self.decoder(_A )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_A )
@apply_forward_hook
def A__ ( self , _A , _A = True ):
if self.use_slicing and z.shape[0] > 1:
__lowerCAmelCase = [self._decode(_A ).sample for z_slice in z.split(1 )]
__lowerCAmelCase = torch.cat(_A )
else:
__lowerCAmelCase = self._decode(_A ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_A )
def A__ ( self , _A , _A , _A ):
__lowerCAmelCase = min(a.shape[2] , b.shape[2] , _A )
for y in range(_A ):
__lowerCAmelCase = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def A__ ( self , _A , _A , _A ):
__lowerCAmelCase = min(a.shape[3] , b.shape[3] , _A )
for x in range(_A ):
__lowerCAmelCase = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def A__ ( self , _A , _A = True ):
__lowerCAmelCase = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__lowerCAmelCase = int(self.tile_latent_min_size * self.tile_overlap_factor )
__lowerCAmelCase = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__lowerCAmelCase = []
for i in range(0 , x.shape[2] , _A ):
__lowerCAmelCase = []
for j in range(0 , x.shape[3] , _A ):
__lowerCAmelCase = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__lowerCAmelCase = self.encoder(_A )
__lowerCAmelCase = self.quant_conv(_A )
row.append(_A )
rows.append(_A )
__lowerCAmelCase = []
for i, row in enumerate(_A ):
__lowerCAmelCase = []
for j, tile in enumerate(_A ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowerCAmelCase = self.blend_v(rows[i - 1][j] , _A , _A )
if j > 0:
__lowerCAmelCase = self.blend_h(row[j - 1] , _A , _A )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_A , dim=3 ) )
__lowerCAmelCase = torch.cat(_A , dim=2 )
__lowerCAmelCase = DiagonalGaussianDistribution(_A )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_A )
def A__ ( self , _A , _A = True ):
__lowerCAmelCase = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__lowerCAmelCase = int(self.tile_sample_min_size * self.tile_overlap_factor )
__lowerCAmelCase = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__lowerCAmelCase = []
for i in range(0 , z.shape[2] , _A ):
__lowerCAmelCase = []
for j in range(0 , z.shape[3] , _A ):
__lowerCAmelCase = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__lowerCAmelCase = self.post_quant_conv(_A )
__lowerCAmelCase = self.decoder(_A )
row.append(_A )
rows.append(_A )
__lowerCAmelCase = []
for i, row in enumerate(_A ):
__lowerCAmelCase = []
for j, tile in enumerate(_A ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowerCAmelCase = self.blend_v(rows[i - 1][j] , _A , _A )
if j > 0:
__lowerCAmelCase = self.blend_h(row[j - 1] , _A , _A )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_A , dim=3 ) )
__lowerCAmelCase = torch.cat(_A , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_A )
def A__ ( self , _A , _A = False , _A = True , _A = None , ):
__lowerCAmelCase = sample
__lowerCAmelCase = self.encode(_A ).latent_dist
if sample_posterior:
__lowerCAmelCase = posterior.sample(generator=_A )
else:
__lowerCAmelCase = posterior.mode()
__lowerCAmelCase = self.decode(_A ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_A )
| 102 | 1 |
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Dict:
if not is_accelerate_available():
return method
snake_case__ = version.parse(accelerate.__version__ ).base_version
if version.parse(__lowerCAmelCase ) < version.parse('''0.17.0''' ):
return method
def wrapper(self , *__lowerCAmelCase , **__lowerCAmelCase ):
if hasattr(self , '''_hf_hook''' ) and hasattr(self._hf_hook , '''pre_forward''' ):
self._hf_hook.pre_forward(self )
return method(self , *__lowerCAmelCase , **__lowerCAmelCase )
return wrapper
| 33 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''tanreinama/GPTSAN-2.8B-spout_is_uniform''': (
'''https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'''
),
}
class __lowerCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase : Optional[int] ="gptsan-japanese"
_UpperCAmelCase : Union[str, Any] =[
"past_key_values",
]
_UpperCAmelCase : int ={
"hidden_size": "d_model",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : List[str] , lowerCAmelCase : Dict=3_60_00 , lowerCAmelCase : Union[str, Any]=12_80 , lowerCAmelCase : Tuple=10_24 , lowerCAmelCase : Any=81_92 , lowerCAmelCase : List[str]=40_96 , lowerCAmelCase : Dict=1_28 , lowerCAmelCase : int=10 , lowerCAmelCase : str=0 , lowerCAmelCase : int=16 , lowerCAmelCase : int=16 , lowerCAmelCase : str=1_28 , lowerCAmelCase : Dict=0.0 , lowerCAmelCase : List[str]=1e-5 , lowerCAmelCase : List[str]=False , lowerCAmelCase : int=0.0 , lowerCAmelCase : Tuple="float32" , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : int=False , lowerCAmelCase : Tuple=False , lowerCAmelCase : Optional[Any]=0.0_0_2 , lowerCAmelCase : Tuple=False , lowerCAmelCase : str=True , lowerCAmelCase : Optional[int]=3_59_98 , lowerCAmelCase : Optional[Any]=3_59_95 , lowerCAmelCase : Tuple=3_59_99 , **lowerCAmelCase : Dict , ):
A_ = vocab_size
A_ = max_position_embeddings
A_ = d_model
A_ = d_ff
A_ = d_ext
A_ = d_spout
A_ = num_switch_layers
A_ = num_ext_layers
A_ = num_switch_layers + num_ext_layers
A_ = num_heads
A_ = num_experts
A_ = expert_capacity
A_ = dropout_rate
A_ = layer_norm_epsilon
A_ = router_bias
A_ = router_jitter_noise
A_ = router_dtype
A_ = router_ignore_padding_tokens
A_ = output_hidden_states
A_ = output_attentions
A_ = initializer_factor
A_ = output_router_logits
A_ = use_cache
super().__init__(
separator_token_id=lowerCAmelCase , pad_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase , )
| 452 | 0 |
'''simple docstring'''
from collections.abc import Callable
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE = None ) -> None:
# Stores actual heap items.
A_ = []
# Stores indexes of each item for supporting updates and deletion.
A_ = {}
# Stores current size of heap.
A_ = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
A_ = key or (lambda _SCREAMING_SNAKE_CASE : x)
def __A ( self , _SCREAMING_SNAKE_CASE ) -> int | None:
return int((i - 1) / 2 ) if i > 0 else None
def __A ( self , _SCREAMING_SNAKE_CASE ) -> int | None:
A_ = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __A ( self , _SCREAMING_SNAKE_CASE ) -> int | None:
A_ = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
A_ ,A_ = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
A_ ,A_ = self.arr[j], self.arr[i]
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
return self.arr[i][1] < self.arr[j][1]
def __A ( self , _SCREAMING_SNAKE_CASE ) -> int:
A_ = self._left(_SCREAMING_SNAKE_CASE )
A_ = self._right(_SCREAMING_SNAKE_CASE )
A_ = i
if left is not None and not self._cmp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ = left
if right is not None and not self._cmp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ = right
return valid_parent
def __A ( self , _SCREAMING_SNAKE_CASE ) -> None:
A_ = self._parent(_SCREAMING_SNAKE_CASE )
while parent is not None and not self._cmp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self._swap(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ ,A_ = parent, self._parent(_SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE ) -> None:
A_ = self._get_valid_parent(_SCREAMING_SNAKE_CASE )
while valid_parent != index:
self._swap(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ ,A_ = valid_parent, self._get_valid_parent(_SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
if item not in self.pos_map:
return
A_ = self.pos_map[item]
A_ = [item, self.key(_SCREAMING_SNAKE_CASE )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(_SCREAMING_SNAKE_CASE )
self._heapify_down(_SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE ) -> None:
if item not in self.pos_map:
return
A_ = self.pos_map[item]
del self.pos_map[item]
A_ = self.arr[self.size - 1]
A_ = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(_SCREAMING_SNAKE_CASE )
self._heapify_down(_SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
A_ = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(_SCREAMING_SNAKE_CASE )] )
else:
A_ = [item, self.key(_SCREAMING_SNAKE_CASE )]
A_ = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __A ( self ) -> tuple | None:
return self.arr[0] if self.size else None
def __A ( self ) -> tuple | None:
A_ = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def _UpperCAmelCase ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 | '''simple docstring'''
from collections import defaultdict
from math import gcd
def _UpperCAmelCase ( _UpperCamelCase : int = 1_50_00_00 ) -> int:
A_ = defaultdict(_UpperCamelCase )
A_ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1, _UpperCamelCase, 2 ):
if gcd(_UpperCamelCase, _UpperCamelCase ) > 1:
continue
A_ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(_UpperCamelCase, limit + 1, _UpperCamelCase ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 174 | 0 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ,snake_case__=13 ,snake_case__=7 ,snake_case__=True ,snake_case__=True ,snake_case__=True ,snake_case__=True ,snake_case__=99 ,snake_case__=64 ,snake_case__=32 ,snake_case__=5 ,snake_case__=4 ,snake_case__=37 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=16 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=3 ,snake_case__=4 ,snake_case__=None ,):
SCREAMING_SNAKE_CASE_ : List[str] = parent
SCREAMING_SNAKE_CASE_ : str = batch_size
SCREAMING_SNAKE_CASE_ : int = seq_length
SCREAMING_SNAKE_CASE_ : int = is_training
SCREAMING_SNAKE_CASE_ : str = use_input_mask
SCREAMING_SNAKE_CASE_ : List[str] = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Dict = use_labels
SCREAMING_SNAKE_CASE_ : str = vocab_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE_ : Dict = embedding_size
SCREAMING_SNAKE_CASE_ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Any = num_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : List[Any] = type_vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = num_labels
SCREAMING_SNAKE_CASE_ : int = num_choices
SCREAMING_SNAKE_CASE_ : int = scope
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE_ : Dict = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE_ : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ):
return MobileBertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,embedding_size=self.embedding_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=snake_case__ ,initializer_range=self.initializer_range ,)
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = MobileBertModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(snake_case__ ,attention_mask=snake_case__ ,token_type_ids=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = model(snake_case__ ,token_type_ids=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = MobileBertForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Any = model(snake_case__ ,attention_mask=snake_case__ ,token_type_ids=snake_case__ ,labels=snake_case__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Any = MobileBertForNextSentencePrediction(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Any = model(
snake_case__ ,attention_mask=snake_case__ ,token_type_ids=snake_case__ ,labels=snake_case__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Any = MobileBertForPreTraining(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] = model(
snake_case__ ,attention_mask=snake_case__ ,token_type_ids=snake_case__ ,labels=snake_case__ ,next_sentence_label=snake_case__ ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = MobileBertForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : str = model(
snake_case__ ,attention_mask=snake_case__ ,token_type_ids=snake_case__ ,start_positions=snake_case__ ,end_positions=snake_case__ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Dict = MobileBertForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : int = model(snake_case__ ,attention_mask=snake_case__ ,token_type_ids=snake_case__ ,labels=snake_case__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE_ : Any = MobileBertForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(snake_case__ ,attention_mask=snake_case__ ,token_type_ids=snake_case__ ,labels=snake_case__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = self.num_choices
SCREAMING_SNAKE_CASE_ : int = MobileBertForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : str = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : str = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(
snake_case__ ,attention_mask=snake_case__ ,token_type_ids=snake_case__ ,labels=snake_case__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : Dict = config_and_inputs
SCREAMING_SNAKE_CASE_ : Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
__a : Any = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
__a : Optional[Any] = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : str = True
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__=False ):
SCREAMING_SNAKE_CASE_ : List[str] = super()._prepare_for_class(snake_case__ ,snake_case__ ,return_labels=snake_case__ )
if return_labels:
if model_class in get_values(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=snake_case__ )
return inputs_dict
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = MobileBertModelTester(self )
SCREAMING_SNAKE_CASE_ : int = ConfigTester(self ,config_class=snake_case__ ,hidden_size=37 )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case__ )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return torch.tensor(
lowerCamelCase_ , dtype=torch.long , device=lowerCamelCase_ , )
UpperCamelCase__ : Dict = 1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : str = model(snake_case__ )[0]
SCREAMING_SNAKE_CASE_ : List[Any] = torch.Size((1, 9, 512) )
self.assertEqual(output.shape ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor(
[
[
[-2.473_6526E07, 8.269_1656E04, 1.652_1838E05],
[-5.754_1704E-01, 3.905_6022E00, 4.401_1507E00],
[2.604_7359E00, 1.567_7652E00, -1.732_4188E-01],
]
] ,device=snake_case__ ,)
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
SCREAMING_SNAKE_CASE_ : int = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
SCREAMING_SNAKE_CASE_ : List[str] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 105 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : Union[List[PIL.Image.Image], np.ndarray]
a : Optional[List[bool]]
a : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 253 | 0 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict=13 , __lowerCamelCase : Union[str, Any]=30 , __lowerCamelCase : Any=2 , __lowerCamelCase : str=3 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : List[Any]=32 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : Any=4 , __lowerCamelCase : Dict=37 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Any=10 , __lowerCamelCase : str=0.02 , __lowerCamelCase : int=3 , __lowerCamelCase : Optional[int]=None , ):
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE = num_patches + 1
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : List[Any] ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , )
def _snake_case ( self : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = TFViTModel(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE = model(lowerCamelCase_ , training=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
SCREAMING_SNAKE_CASE = self.image_size // 2
SCREAMING_SNAKE_CASE = pixel_values[:, :, :image_size, :image_size]
SCREAMING_SNAKE_CASE = model(lowerCamelCase_ , interpolate_pos_encoding=lowerCamelCase_ , training=lowerCamelCase_ )
SCREAMING_SNAKE_CASE = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.type_sequence_label_size
SCREAMING_SNAKE_CASE = TFViTForImageClassification(lowerCamelCase_ )
SCREAMING_SNAKE_CASE = model(lowerCamelCase_ , labels=lowerCamelCase_ , training=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
SCREAMING_SNAKE_CASE = self.image_size // 2
SCREAMING_SNAKE_CASE = pixel_values[:, :, :image_size, :image_size]
SCREAMING_SNAKE_CASE = model(lowerCamelCase_ , interpolate_pos_encoding=lowerCamelCase_ , training=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = TFViTForImageClassification(lowerCamelCase_ )
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
lowerCamelCase__ = (
{"feature-extraction": TFViTModel, "image-classification": TFViTForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = TFViTModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 )
def _snake_case ( self : Optional[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def _snake_case ( self : Optional[int] ):
pass
@unittest.skip(reason="ViT does not use inputs_embeds" )
def _snake_case ( self : int ):
pass
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ , tf.keras.layers.Layer ) )
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = TFViTModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(lowerCamelCase_ )
def __a ( ):
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self : Union[str, Any] ):
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224" )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=lowerCamelCase_ , return_tensors="tf" )
# forward pass
SCREAMING_SNAKE_CASE = model(**lowerCamelCase_ )
# verify the logits
SCREAMING_SNAKE_CASE = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE = tf.constant([-0.2_744, 0.8_215, -0.0_836] )
tf.debugging.assert_near(outputs.logits[0, :3] , lowerCamelCase_ , atol=1e-4 ) | 713 |
from collections import deque
from .hash_table import HashTable
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def __init__( self : Optional[int] , *__lowerCamelCase : List[Any] , **__lowerCamelCase : Optional[Any] ):
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
def _snake_case ( self : Any , __lowerCamelCase : Dict , __lowerCamelCase : Tuple ):
SCREAMING_SNAKE_CASE = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.values[key]
def _snake_case ( self : Union[str, Any] ):
return (
sum(self.charge_factor - len(__lowerCamelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple=None ):
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(__lowerCamelCase ) == 0
):
return key
return super()._collision_resolution(__lowerCamelCase , __lowerCamelCase ) | 698 | 0 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
lowercase : int = True
except (ImportError, ModuleNotFoundError):
lowercase : List[str] = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def lowerCamelCase__ ( __lowercase ):
re.sub("""<n>""" , """""" , lowercase_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(lowercase_ ) )
| 116 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A (__UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = ["""image_processor""", """tokenizer"""]
_SCREAMING_SNAKE_CASE = """LayoutLMv3ImageProcessor"""
_SCREAMING_SNAKE_CASE = ("""LayoutLMv3Tokenizer""", """LayoutLMv3TokenizerFast""")
def __init__( self , lowercase_=None , lowercase_=None , **lowercase_ ) -> Tuple:
'''simple docstring'''
_snake_case : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowercase_ , )
_snake_case : int = kwargs.pop('''feature_extractor''' )
_snake_case : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowercase_ , lowercase_ )
def __call__( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = True , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = False , lowercase_ = False , lowercase_ = False , lowercase_ = False , lowercase_ = True , lowercase_ = None , **lowercase_ , ) -> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
# first, apply the image processor
_snake_case : Tuple = self.image_processor(images=lowercase_ , return_tensors=lowercase_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase_ , lowercase_ ):
_snake_case : List[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
_snake_case : Any = features['''words''']
_snake_case : Union[str, Any] = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# add pixel values
_snake_case : Any = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
_snake_case : Tuple = self.get_overflowing_images(lowercase_ , encoded_inputs['''overflow_to_sample_mapping'''] )
_snake_case : List[str] = images
return encoded_inputs
def __a ( self , lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
_snake_case : int = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
F''' {len(lowercase_ )} and {len(lowercase_ )}''' )
return images_with_overflow
def __a ( self , *lowercase_ , **lowercase_ ) -> Any:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def __a ( self , *lowercase_ , **lowercase_ ) -> int:
'''simple docstring'''
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def __a ( self ) -> int:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def __a ( self ) -> Tuple:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowercase_ , )
return self.image_processor_class
@property
def __a ( self ) -> str:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowercase_ , )
return self.image_processor
| 326 | 0 |
"""simple docstring"""
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase = '▁'
_lowerCAmelCase = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase (__snake_case , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : int = BigBirdTokenizer
_SCREAMING_SNAKE_CASE : Any = BigBirdTokenizerFast
_SCREAMING_SNAKE_CASE : List[str] = True
_SCREAMING_SNAKE_CASE : str = True
def __snake_case ( self :int ) ->int:
super().setUp()
lowercase : Tuple = self.tokenizer_class(__magic_name__ , keep_accents=__magic_name__ )
tokenizer.save_pretrained(self.tmpdirname )
def __snake_case ( self :str ) ->int:
lowercase : Optional[Any] = """<s>"""
lowercase : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def __snake_case ( self :str ) ->Dict:
lowercase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """[MASK]""" )
self.assertEqual(len(__magic_name__ ) , 1_004 )
def __snake_case ( self :List[str] ) ->Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def __snake_case ( self :List[str] ) ->Dict:
if not self.test_rust_tokenizer:
return
lowercase : Union[str, Any] = self.get_tokenizer()
lowercase : Any = self.get_rust_tokenizer()
lowercase : Optional[Any] = """I was born in 92000, and this is falsé."""
lowercase : List[str] = tokenizer.tokenize(__magic_name__ )
lowercase : List[Any] = rust_tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
lowercase : Dict = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
lowercase : List[str] = rust_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
lowercase : Tuple = self.get_rust_tokenizer()
lowercase : Union[str, Any] = tokenizer.encode(__magic_name__ )
lowercase : Any = rust_tokenizer.encode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
def __snake_case ( self :List[str] ) ->Optional[Any]:
lowercase : List[str] = BigBirdTokenizer(__magic_name__ , keep_accents=__magic_name__ )
lowercase : Optional[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__magic_name__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__magic_name__ ) , [285, 46, 10, 170, 382] , )
lowercase : int = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__magic_name__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowercase : str = tokenizer.convert_tokens_to_ids(__magic_name__ )
self.assertListEqual(
__magic_name__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowercase : Optional[int] = tokenizer.convert_ids_to_tokens(__magic_name__ )
self.assertListEqual(
__magic_name__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def __snake_case ( self :Dict ) ->Dict:
return BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" )
@slow
def __snake_case ( self :Tuple ) ->int:
lowercase : Optional[int] = """Hello World!"""
lowercase : Tuple = [65, 18_536, 2_260, 101, 66]
self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) )
@slow
def __snake_case ( self :int ) ->List[Any]:
lowercase : Union[str, Any] = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
# fmt: off
lowercase : List[Any] = [65, 871, 419, 358, 946, 991, 2_521, 452, 358, 1_357, 387, 7_751, 3_536, 112, 985, 456, 126, 865, 938, 5_400, 5_734, 458, 1_368, 467, 786, 2_462, 5_246, 1_159, 633, 865, 4_519, 457, 582, 852, 2_557, 427, 916, 508, 405, 34_324, 497, 391, 408, 11_342, 1_244, 385, 100, 938, 985, 456, 574, 362, 12_597, 3_200, 3_129, 1_172, 66] # noqa: E231
# fmt: on
self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) )
@require_torch
@slow
def __snake_case ( self :Dict ) ->List[Any]:
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
lowercase : Optional[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowercase : Dict = """ """.join(__magic_name__ )
lowercase : Optional[Any] = self.big_tokenizer.encode_plus(__magic_name__ , return_tensors="""pt""" , return_token_type_ids=__magic_name__ )
lowercase : List[str] = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=__magic_name__ )
lowercase : Any = BigBirdConfig(attention_type="""original_full""" )
lowercase : Tuple = BigBirdModel(__magic_name__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__magic_name__ )
model(**__magic_name__ )
@slow
def __snake_case ( self :Optional[int] ) ->Any:
lowercase : List[Any] = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" )
lowercase : Any = tokenizer.decode(tokenizer("""Paris is the [MASK].""" ).input_ids )
self.assertTrue(decoded_text == """[CLS] Paris is the[MASK].[SEP]""" )
@slow
def __snake_case ( self :Optional[Any] ) ->str:
# fmt: off
lowercase : Any = {"""input_ids""": [[65, 39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114, 66], [65, 448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name="""google/bigbird-roberta-base""" , revision="""215c99f1600e06f83acce68422f2035b2b5c3510""" , )
| 707 |
"""simple docstring"""
import logging
from transformers import PretrainedConfig
_lowerCAmelCase = logging.getLogger(__name__)
_lowerCAmelCase = {
'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json',
}
class UpperCamelCase (__snake_case ):
_SCREAMING_SNAKE_CASE : Any = """bertabs"""
def __init__( self :str , __magic_name__ :Optional[int]=30_522 , __magic_name__ :int=512 , __magic_name__ :Optional[Any]=6 , __magic_name__ :Optional[Any]=512 , __magic_name__ :Optional[Any]=8 , __magic_name__ :int=512 , __magic_name__ :List[Any]=0.2 , __magic_name__ :Union[str, Any]=6 , __magic_name__ :int=768 , __magic_name__ :List[str]=8 , __magic_name__ :List[Any]=2_048 , __magic_name__ :Optional[Any]=0.2 , **__magic_name__ :Dict , ) ->Optional[int]:
super().__init__(**__magic_name__ )
lowercase : Optional[int] = vocab_size
lowercase : List[Any] = max_pos
lowercase : int = enc_layers
lowercase : str = enc_hidden_size
lowercase : List[str] = enc_heads
lowercase : List[Any] = enc_ff_size
lowercase : List[Any] = enc_dropout
lowercase : Any = dec_layers
lowercase : str = dec_hidden_size
lowercase : List[Any] = dec_heads
lowercase : Union[str, Any] = dec_ff_size
lowercase : Any = dec_dropout
| 348 | 0 |
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowerCAmelCase_ : Tuple = logging.getLogger(__name__)
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
if os.path.exists(_lowerCAmelCase ):
if os.path.exists(os.path.join(_lowerCAmelCase , """config.json""" ) ) and os.path.isfile(
os.path.join(_lowerCAmelCase , """config.json""" ) ):
os.remove(os.path.join(_lowerCAmelCase , """config.json""" ) )
if os.path.exists(os.path.join(_lowerCAmelCase , """pytorch_model.bin""" ) ) and os.path.isfile(
os.path.join(_lowerCAmelCase , """pytorch_model.bin""" ) ):
os.remove(os.path.join(_lowerCAmelCase , """pytorch_model.bin""" ) )
else:
os.makedirs(_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
def __A ( lowerCAmelCase_ , lowerCAmelCase_=False ):
_UpperCAmelCase : str = 2
if unlogit:
_UpperCAmelCase : List[str] = torch.pow(_lowerCAmelCase , _lowerCAmelCase )
_UpperCAmelCase : Any = p * torch.log(_lowerCAmelCase )
_UpperCAmelCase : str = 0
return -plogp.sum(dim=-1 )
def __A ( lowerCAmelCase_ ):
logger.info("""lv, h >\t""" + """\t""".join(f"{x + 1}" for x in range(len(_lowerCAmelCase ) ) ) )
for row in range(len(_lowerCAmelCase ) ):
if tensor.dtype != torch.long:
logger.info(f"layer {row + 1}:\t" + """\t""".join(f"{x:.5f}" for x in tensor[row].cpu().data ) )
else:
logger.info(f"layer {row + 1}:\t" + """\t""".join(f"{x:d}" for x in tensor[row].cpu().data ) )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=False ):
_UpperCAmelCase , _UpperCAmelCase : Any = model.config.num_hidden_layers, model.config.num_attention_heads
_UpperCAmelCase : Any = torch.zeros(_lowerCAmelCase , _lowerCAmelCase ).to(args.device )
_UpperCAmelCase : Tuple = torch.zeros(_lowerCAmelCase , _lowerCAmelCase ).to(args.device )
if head_mask is None:
_UpperCAmelCase : Optional[int] = torch.ones(_lowerCAmelCase , _lowerCAmelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=_lowerCAmelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_UpperCAmelCase : Any = None
_UpperCAmelCase : Optional[int] = 0.0
_UpperCAmelCase : Tuple = 0.0
for step, inputs in enumerate(tqdm(_lowerCAmelCase , desc="""Iteration""" , disable=args.local_rank not in [-1, 0] ) ):
_UpperCAmelCase : Dict = tuple(t.to(args.device ) for t in inputs )
((_UpperCAmelCase ) , ) : List[Any] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_UpperCAmelCase : Optional[Any] = model(_lowerCAmelCase , labels=_lowerCAmelCase , head_mask=_lowerCAmelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(_lowerCAmelCase ):
_UpperCAmelCase : List[str] = entropy(attn.detach() , _lowerCAmelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(_lowerCAmelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_UpperCAmelCase : Dict = 2
_UpperCAmelCase : int = torch.pow(torch.pow(_lowerCAmelCase , _lowerCAmelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
_UpperCAmelCase : Tuple = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("""Attention entropies""" )
print_ad_tensor(_lowerCAmelCase )
if compute_importance:
logger.info("""Head importance scores""" )
print_ad_tensor(_lowerCAmelCase )
logger.info("""Head ranked by importance scores""" )
_UpperCAmelCase : List[Any] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_UpperCAmelCase : int = torch.arange(
head_importance.numel() , device=args.device )
_UpperCAmelCase : Tuple = head_ranks.view_as(_lowerCAmelCase )
print_ad_tensor(_lowerCAmelCase )
return attn_entropy, head_importance, total_loss
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = compute_heads_importance(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , compute_entropy=_lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = 1 / loss # instead of downsteam score use the LM loss
logger.info("""Pruning: original score: %f, threshold: %f""" , _lowerCAmelCase , original_score * args.masking_threshold )
_UpperCAmelCase : Optional[Any] = torch.ones_like(_lowerCAmelCase )
_UpperCAmelCase : int = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_UpperCAmelCase : List[str] = original_score
while current_score >= original_score * args.masking_threshold:
_UpperCAmelCase : Any = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_UpperCAmelCase : Union[str, Any] = float("""Inf""" )
_UpperCAmelCase : Union[str, Any] = head_importance.view(-1 ).sort()[1]
if len(_lowerCAmelCase ) <= num_to_mask:
print("""BREAK BY num_to_mask""" )
break
# mask heads
_UpperCAmelCase : int = current_heads_to_mask[:num_to_mask]
logger.info("""Heads to mask: %s""" , str(current_heads_to_mask.tolist() ) )
_UpperCAmelCase : List[Any] = new_head_mask.view(-1 )
_UpperCAmelCase : Any = 0.0
_UpperCAmelCase : Dict = new_head_mask.view_as(_lowerCAmelCase )
_UpperCAmelCase : Any = new_head_mask.clone().detach()
print_ad_tensor(_lowerCAmelCase )
# Compute metric and head importance again
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = compute_heads_importance(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , compute_entropy=_lowerCAmelCase , head_mask=_lowerCAmelCase )
_UpperCAmelCase : Any = 1 / loss
logger.info(
"""Masking: current score: %f, remaining heads %d (%.1f percents)""" , _lowerCAmelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("""Final head mask""" )
print_ad_tensor(_lowerCAmelCase )
np.save(os.path.join(args.output_dir , """head_mask.npy""" ) , head_mask.detach().cpu().numpy() )
return head_mask
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Tuple = datetime.now()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[Any] = compute_heads_importance(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , compute_entropy=_lowerCAmelCase , compute_importance=_lowerCAmelCase , head_mask=_lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = 1 / loss
_UpperCAmelCase : int = datetime.now() - before_time
_UpperCAmelCase : Optional[int] = sum(p.numel() for p in model.parameters() )
_UpperCAmelCase : str = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_lowerCAmelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_UpperCAmelCase : Tuple = [
v,
]
assert sum(len(_lowerCAmelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(_lowerCAmelCase )
_UpperCAmelCase : Any = sum(p.numel() for p in model.parameters() )
_UpperCAmelCase : Any = datetime.now()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = compute_heads_importance(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , compute_entropy=_lowerCAmelCase , compute_importance=_lowerCAmelCase , head_mask=_lowerCAmelCase , actually_pruned=_lowerCAmelCase , )
_UpperCAmelCase : Union[str, Any] = 1 / loss
_UpperCAmelCase : Union[str, Any] = datetime.now() - before_time
logger.info(
"""Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)""" , _lowerCAmelCase , _lowerCAmelCase , pruned_num_params / original_num_params * 100 , )
logger.info("""Pruning: score with masking: %f score with pruning: %f""" , _lowerCAmelCase , _lowerCAmelCase )
logger.info("""Pruning: speed ratio (original timing / new timing): %f percents""" , original_time / new_time * 100 )
save_model(_lowerCAmelCase , args.output_dir )
def __A ( ):
_UpperCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--data_dir""" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="""The input data dir. Should contain the .tsv files (or other data files) for the task.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--output_dir""" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="""The output directory where the model predictions and checkpoints will be written.""" , )
# Other parameters
parser.add_argument(
"""--config_name""" , default="""""" , type=_lowerCAmelCase , help="""Pretrained config name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--tokenizer_name""" , default="""""" , type=_lowerCAmelCase , help="""Pretrained tokenizer name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--cache_dir""" , default=_lowerCAmelCase , type=_lowerCAmelCase , help="""Where do you want to store the pre-trained models downloaded from s3""" , )
parser.add_argument(
"""--data_subset""" , type=_lowerCAmelCase , default=-1 , help="""If > 0: limit the data to a subset of data_subset instances.""" )
parser.add_argument(
"""--overwrite_output_dir""" , action="""store_true""" , help="""Whether to overwrite data in output directory""" )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
parser.add_argument(
"""--dont_normalize_importance_by_layer""" , action="""store_true""" , help="""Don't normalize importance score by layers""" )
parser.add_argument(
"""--dont_normalize_global_importance""" , action="""store_true""" , help="""Don't normalize all importance scores between 0 and 1""" , )
parser.add_argument(
"""--try_masking""" , action="""store_true""" , help="""Whether to try to mask head until a threshold of accuracy.""" )
parser.add_argument(
"""--masking_threshold""" , default=0.9 , type=_lowerCAmelCase , help="""masking threshold in term of metrics (stop masking when metric < threshold * original metric value).""" , )
parser.add_argument(
"""--masking_amount""" , default=0.1 , type=_lowerCAmelCase , help="""Amount to heads to masking at each masking step.""" )
parser.add_argument("""--metric_name""" , default="""acc""" , type=_lowerCAmelCase , help="""Metric to use for head masking.""" )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=_lowerCAmelCase , help=(
"""The maximum total input sequence length after WordPiece tokenization. \n"""
"""Sequences longer than this will be truncated, sequences shorter padded."""
) , )
parser.add_argument("""--batch_size""" , default=1 , type=_lowerCAmelCase , help="""Batch size.""" )
parser.add_argument("""--seed""" , type=_lowerCAmelCase , default=42 )
parser.add_argument("""--local_rank""" , type=_lowerCAmelCase , default=-1 , help="""local_rank for distributed training on gpus""" )
parser.add_argument("""--no_cuda""" , action="""store_true""" , help="""Whether not to use CUDA when available""" )
parser.add_argument("""--server_ip""" , type=_lowerCAmelCase , default="""""" , help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""" , type=_lowerCAmelCase , default="""""" , help="""Can be used for distant debugging.""" )
_UpperCAmelCase : int = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_lowerCAmelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_UpperCAmelCase : List[str] = torch.device("""cuda""" if torch.cuda.is_available() and not args.no_cuda else """cpu""" )
_UpperCAmelCase : List[Any] = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_UpperCAmelCase : str = torch.device("""cuda""" , args.local_rank )
_UpperCAmelCase : Optional[Any] = 1
torch.distributed.init_process_group(backend="""nccl""" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("""device: {} n_gpu: {}, distributed: {}""".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_UpperCAmelCase : Dict = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_UpperCAmelCase : str = nn.parallel.DistributedDataParallel(
_lowerCAmelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_lowerCAmelCase )
elif args.n_gpu > 1:
_UpperCAmelCase : Optional[int] = nn.DataParallel(_lowerCAmelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=_lowerCAmelCase )
torch.save(_lowerCAmelCase , os.path.join(args.output_dir , """run_args.bin""" ) )
logger.info("""Training/evaluation parameters %s""" , _lowerCAmelCase )
# Prepare dataset
_UpperCAmelCase : Any = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_UpperCAmelCase : Any = (torch.from_numpy(_lowerCAmelCase ),)
_UpperCAmelCase : List[Any] = TensorDataset(*_lowerCAmelCase )
_UpperCAmelCase : Any = RandomSampler(_lowerCAmelCase )
_UpperCAmelCase : List[Any] = DataLoader(_lowerCAmelCase , sampler=_lowerCAmelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_UpperCAmelCase : Dict = mask_heads(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
prune_heads(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 414 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCAmelCase ={
"configuration_vivit": ["VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VivitConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =["VivitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"VivitModel",
"VivitPreTrainedModel",
"VivitForVideoClassification",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__lowerCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 333 | 0 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
lowerCAmelCase = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
lowerCAmelCase = {"""facebook/blenderbot_small-90M""": 512}
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Tuple = set()
__UpperCAmelCase : str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : Union[str, Any] = char
__UpperCAmelCase : Union[str, Any] = set(lowercase_ )
return pairs
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Any = VOCAB_FILES_NAMES
_lowerCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self , lowercase__ , lowercase__ , lowercase__="__start__" , lowercase__="__end__" , lowercase__="__unk__" , lowercase__="__null__" , **lowercase__ , ):
super().__init__(unk_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , pad_token=lowercase__ , **lowercase__)
with open(lowercase__ , encoding='''utf-8''') as vocab_handle:
__UpperCAmelCase : str = json.load(lowercase__)
__UpperCAmelCase : Optional[Any] = {v: k for k, v in self.encoder.items()}
with open(lowercase__ , encoding='''utf-8''') as merges_handle:
__UpperCAmelCase : Optional[Any] = merges_handle.read().split('''\n''')[1:-1]
__UpperCAmelCase : Optional[int] = [tuple(merge.split()) for merge in merges]
__UpperCAmelCase : List[str] = dict(zip(lowercase__ , range(len(lowercase__))))
__UpperCAmelCase : Tuple = {}
@property
def A( self):
return len(self.encoder)
def A( self):
return dict(self.encoder , **self.added_tokens_encoder)
def A( self , lowercase__):
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : List[str] = re.sub('''([.,!?()])''' , r''' \1''' , lowercase__)
__UpperCAmelCase : Optional[int] = re.sub('''(\')''' , r''' \1 ''' , lowercase__)
__UpperCAmelCase : List[Any] = re.sub(r'''\s{2,}''' , ''' ''' , lowercase__)
if "\n" in token:
__UpperCAmelCase : int = token.replace('''\n''' , ''' __newln__''')
__UpperCAmelCase : str = token.split(''' ''')
__UpperCAmelCase : Any = []
for token in tokens:
if not len(lowercase__):
continue
__UpperCAmelCase : List[str] = token.lower()
__UpperCAmelCase : Tuple = tuple(lowercase__)
__UpperCAmelCase : int = tuple(list(word[:-1]) + [word[-1] + '''</w>'''])
__UpperCAmelCase : List[str] = get_pairs(lowercase__)
if not pairs:
words.append(lowercase__)
continue
while True:
__UpperCAmelCase : Optional[Any] = min(lowercase__ , key=lambda lowercase__: self.bpe_ranks.get(lowercase__ , float('''inf''')))
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase : List[Any] = bigram
__UpperCAmelCase : List[str] = []
__UpperCAmelCase : Optional[Any] = 0
while i < len(lowercase__):
try:
__UpperCAmelCase : Optional[int] = word.index(lowercase__ , lowercase__)
new_word.extend(word[i:j])
__UpperCAmelCase : List[Any] = j
except ValueError:
new_word.extend(word[i:])
break
if word[i] == first and i < len(lowercase__) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
__UpperCAmelCase : List[Any] = tuple(lowercase__)
__UpperCAmelCase : List[Any] = new_word
if len(lowercase__) == 1:
break
else:
__UpperCAmelCase : Dict = get_pairs(lowercase__)
__UpperCAmelCase : Union[str, Any] = '''@@ '''.join(lowercase__)
__UpperCAmelCase : Any = word[:-4]
__UpperCAmelCase : Union[str, Any] = word
words.append(lowercase__)
return " ".join(lowercase__)
def A( self , lowercase__):
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : Optional[int] = re.findall(r'''\S+\n?''' , lowercase__)
for token in words:
split_tokens.extend(list(self.bpe(lowercase__).split(''' ''')))
return split_tokens
def A( self , lowercase__):
__UpperCAmelCase : Any = token.lower()
return self.encoder.get(lowercase__ , self.encoder.get(self.unk_token))
def A( self , lowercase__):
return self.decoder.get(lowercase__ , self.unk_token)
def A( self , lowercase__):
__UpperCAmelCase : List[str] = ''' '''.join(lowercase__).replace('''@@ ''' , '''''').strip()
return out_string
def A( self , lowercase__ , lowercase__ = None):
if not os.path.isdir(lowercase__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
__UpperCAmelCase : int = os.path.join(
lowercase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
__UpperCAmelCase : List[str] = os.path.join(
lowercase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''])
with open(lowercase__ , '''w''' , encoding='''utf-8''') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowercase__ , ensure_ascii=lowercase__) + '''\n''')
__UpperCAmelCase : List[str] = 0
with open(lowercase__ , '''w''' , encoding='''utf-8''') as writer:
writer.write('''#version: 0.2\n''')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowercase__: kv[1]):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
''' Please check that the tokenizer is not corrupted!''')
__UpperCAmelCase : List[Any] = token_index
writer.write(''' '''.join(lowercase__) + '''\n''')
index += 1
return vocab_file, merge_file
| 675 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : int = '''bert'''
def __init__( self , lowercase__=3_0_5_2_2 , lowercase__=7_6_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=3_0_7_2 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=1e-12 , lowercase__=0 , lowercase__="absolute" , lowercase__=True , lowercase__=None , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , **lowercase__)
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : Any = hidden_size
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : Tuple = hidden_act
__UpperCAmelCase : int = intermediate_size
__UpperCAmelCase : List[Any] = hidden_dropout_prob
__UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCAmelCase : List[Any] = max_position_embeddings
__UpperCAmelCase : Union[str, Any] = type_vocab_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : List[str] = position_embedding_type
__UpperCAmelCase : Optional[Any] = use_cache
__UpperCAmelCase : List[Any] = classifier_dropout
class lowerCamelCase ( _UpperCamelCase ):
@property
def A( self):
if self.task == "multiple-choice":
__UpperCAmelCase : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
])
| 675 | 1 |
"""simple docstring"""
from collections import deque
from .hash_table import HashTable
class __a ( __a ):
'''simple docstring'''
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ) -> Any:
'''simple docstring'''
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
'''simple docstring'''
__lowercase = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_lowerCamelCase )
__lowercase = self.values[key]
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
return (
sum(self.charge_factor - len(_lowerCamelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase=None ) -> Dict:
'''simple docstring'''
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_lowerCamelCase ) == 0
):
return key
return super()._collision_resolution(_lowerCamelCase , _lowerCamelCase )
| 118 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
_lowercase = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
_lowercase = {'''facebook/blenderbot-3B''': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCAmelCase__ ( ) ->Any:
__lowercase = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
__lowercase = bs[:]
__lowercase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__magic_name__ )
cs.append(2**8 + n )
n += 1
__lowercase = [chr(__magic_name__ ) for n in cs]
return dict(zip(__magic_name__ , __magic_name__ ) )
def lowerCAmelCase__ ( __magic_name__ ) ->Tuple:
__lowercase = set()
__lowercase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowercase = char
return pairs
class __a ( __a ):
'''simple docstring'''
_lowerCamelCase : List[str] = VOCAB_FILES_NAMES
_lowerCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="replace" , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase=False , **_lowerCamelCase , ) -> int:
'''simple docstring'''
__lowercase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else bos_token
__lowercase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else eos_token
__lowercase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else sep_token
__lowercase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else cls_token
__lowercase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else unk_token
__lowercase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowercase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
super().__init__(
errors=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , **_lowerCamelCase , )
with open(_lowerCamelCase , encoding="utf-8" ) as vocab_handle:
__lowercase = json.load(_lowerCamelCase )
__lowercase = {v: k for k, v in self.encoder.items()}
__lowercase = errors # how to handle errors in decoding
__lowercase = bytes_to_unicode()
__lowercase = {v: k for k, v in self.byte_encoder.items()}
with open(_lowerCamelCase , encoding="utf-8" ) as merges_handle:
__lowercase = merges_handle.read().split("\n" )[1:-1]
__lowercase = [tuple(merge.split() ) for merge in bpe_merges]
__lowercase = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
__lowercase = {}
__lowercase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__lowercase = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
return len(self.encoder )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> int:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__lowercase = tuple(_lowerCamelCase )
__lowercase = get_pairs(_lowerCamelCase )
if not pairs:
return token
while True:
__lowercase = min(_lowerCamelCase , key=lambda _lowerCamelCase : self.bpe_ranks.get(_lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__lowercase , __lowercase = bigram
__lowercase = []
__lowercase = 0
while i < len(_lowerCamelCase ):
try:
__lowercase = word.index(_lowerCamelCase , _lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowercase = j
if word[i] == first and i < len(_lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowercase = tuple(_lowerCamelCase )
__lowercase = new_word
if len(_lowerCamelCase ) == 1:
break
else:
__lowercase = get_pairs(_lowerCamelCase )
__lowercase = " ".join(_lowerCamelCase )
__lowercase = word
return word
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = []
for token in re.findall(self.pat , _lowerCamelCase ):
__lowercase = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowerCamelCase ).split(" " ) )
return bpe_tokens
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> Tuple:
'''simple docstring'''
return self.encoder.get(_lowerCamelCase , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> str:
'''simple docstring'''
return self.decoder.get(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> List[str]:
'''simple docstring'''
__lowercase = "".join(_lowerCamelCase )
__lowercase = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__lowercase = os.path.join(
_lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__lowercase = os.path.join(
_lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCamelCase , ensure_ascii=_lowerCamelCase ) + "\n" )
__lowercase = 0
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
__lowercase = token_index
writer.write(" ".join(_lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCamelCase )) + [1]
return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1]
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase = None ) -> List[int]:
'''simple docstring'''
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase=False , **_lowerCamelCase ) -> int:
'''simple docstring'''
__lowercase = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_lowerCamelCase ) > 0 and not text[0].isspace()):
__lowercase = " " + text
return (text, kwargs)
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase = None ) -> Tuple:
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> List[int]:
'''simple docstring'''
__lowercase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(_lowerCamelCase )
__lowercase = " ".join(_lowerCamelCase )
__lowercase = self.encode(_lowerCamelCase )
if len(_lowerCamelCase ) > self.model_max_length:
__lowercase = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 118 | 1 |
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class snake_case_ :
def __init__( self , a_ , a_=1_3 , a_=7 , a_=False , a_=True , a_=False , a_=True , a_=3_3 , a_=3_2 , a_=5 , a_=4 , a_=3_7 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_1_2 , a_=1_6 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ):
a_ : List[Any] = parent
a_ : Union[str, Any] = batch_size
a_ : Optional[Any] = seq_length
a_ : Optional[Any] = is_training
a_ : Optional[int] = use_input_mask
a_ : str = use_token_type_ids
a_ : Dict = use_labels
a_ : Optional[int] = vocab_size
a_ : str = hidden_size
a_ : List[Any] = num_hidden_layers
a_ : Optional[int] = num_attention_heads
a_ : str = intermediate_size
a_ : Dict = hidden_act
a_ : List[Any] = hidden_dropout_prob
a_ : Any = attention_probs_dropout_prob
a_ : str = max_position_embeddings
a_ : List[Any] = type_vocab_size
a_ : Optional[Any] = type_sequence_label_size
a_ : str = initializer_range
a_ : Tuple = num_labels
a_ : Any = num_choices
a_ : List[str] = scope
def snake_case_ ( self ):
a_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ : List[str] = None
if self.use_input_mask:
a_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
a_ : Optional[Any] = None
a_ : Tuple = None
a_ : List[str] = None
if self.use_labels:
a_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a_ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
a_ : List[str] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self ):
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def snake_case_ ( self , a_ , a_ , a_ , a_ , a_ , a_ ):
a_ : Tuple = EsmModel(config=a_ )
model.to(a_ )
model.eval()
a_ : Tuple = model(a_ , attention_mask=a_ )
a_ : Any = model(a_ )
a_ : List[Any] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case_ ( self , a_ , a_ , a_ , a_ , a_ , a_ ):
a_ : str = EsmForMaskedLM(config=a_ )
model.to(a_ )
model.eval()
a_ : Any = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self , a_ , a_ , a_ , a_ , a_ , a_ ):
a_ : int = self.num_labels
a_ : Tuple = EsmForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
a_ : Optional[Any] = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ ( self ):
a_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) : Any = config_and_inputs
a_ : str = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case_ ( a_ ,a_ ,unittest.TestCase ):
__lowerCAmelCase = False
__lowerCAmelCase = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
__lowerCAmelCase = ()
__lowerCAmelCase = (
{
"feature-extraction": EsmModel,
"fill-mask": EsmForMaskedLM,
"text-classification": EsmForSequenceClassification,
"token-classification": EsmForTokenClassification,
"zero-shot": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase = True
def snake_case_ ( self ):
a_ : List[Any] = EsmModelTester(self )
a_ : Union[str, Any] = ConfigTester(self , config_class=a_ , hidden_size=3_7 )
def snake_case_ ( self ):
self.config_tester.run_common_tests()
def snake_case_ ( self ):
a_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def snake_case_ ( self ):
a_ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a_ : List[Any] = type
self.model_tester.create_and_check_model(*a_ )
def snake_case_ ( self ):
a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a_ )
def snake_case_ ( self ):
a_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
@slow
def snake_case_ ( self ):
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : Tuple = EsmModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def snake_case_ ( self ):
a_ : List[str] = self.model_tester.prepare_config_and_inputs()[0]
a_ : Optional[Any] = EsmEmbeddings(config=a_ )
a_ : Tuple = torch.as_tensor([[1_2, 3_1, 1_3, model.padding_idx]] )
a_ : List[Any] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
a_ : Tuple = create_position_ids_from_input_ids(a_ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(a_ , a_ ) ) )
def snake_case_ ( self ):
a_ : Any = self.model_tester.prepare_config_and_inputs()[0]
a_ : Any = EsmEmbeddings(config=a_ )
a_ : List[str] = torch.empty(2 , 4 , 3_0 )
a_ : Dict = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
a_ : str = torch.as_tensor([expected_single_positions, expected_single_positions] )
a_ : Any = embeddings.create_position_ids_from_inputs_embeds(a_ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(a_ , a_ ) ) )
@unittest.skip("Esm does not support embedding resizing" )
def snake_case_ ( self ):
pass
@unittest.skip("Esm does not support embedding resizing" )
def snake_case_ ( self ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def snake_case_ ( self ):
pass
@require_torch
class snake_case_ ( a_ ):
@slow
def snake_case_ ( self ):
with torch.no_grad():
a_ : int = EsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
model.eval()
a_ : Optional[int] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
a_ : Optional[int] = model(a_ )[0]
a_ : List[Any] = 3_3
a_ : List[Any] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , a_ )
a_ : Dict = torch.tensor(
[[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , a_ , atol=1e-4 ) )
@slow
def snake_case_ ( self ):
with torch.no_grad():
a_ : str = EsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
model.eval()
a_ : Optional[Any] = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
a_ : Dict = model(a_ )[0]
# compare the actual values for a slice.
a_ : List[str] = torch.tensor(
[[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , a_ , atol=1e-4 ) ) | 370 |
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 0 # The first color of the flag.
SCREAMING_SNAKE_CASE_ = 1 # The second color of the flag.
SCREAMING_SNAKE_CASE_ = 2 # The third color of the flag.
SCREAMING_SNAKE_CASE_ = (red, white, blue)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> list:
if not sequence:
return []
if len(SCREAMING_SNAKE_CASE__ ) == 1:
return list(SCREAMING_SNAKE_CASE__ )
a_ : Dict = 0
a_ : List[Any] = len(SCREAMING_SNAKE_CASE__ ) - 1
a_ : str = 0
while mid <= high:
if sequence[mid] == colors[0]:
a_ , a_ : int = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
a_ , a_ : List[Any] = sequence[high], sequence[mid]
high -= 1
else:
a_ : Dict = F"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(SCREAMING_SNAKE_CASE__ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE_ = input("""Enter numbers separated by commas:\n""").strip()
SCREAMING_SNAKE_CASE_ = [int(item.strip()) for item in user_input.split(""",""")]
print(F"""{dutch_national_flag_sort(unsorted)}""") | 370 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( A : float ) -> float:
return 1_0 - x * x
def __UpperCAmelCase ( A : float , A : float ) -> float:
# Bolzano theory in order to find if there is a root between a and b
if equation(__UpperCamelCase ) * equation(__UpperCamelCase ) >= 0:
raise ValueError('''Wrong space!''' )
UpperCAmelCase_ : Union[str, Any] = a
while (b - a) >= 0.01:
# Find middle point
UpperCAmelCase_ : Optional[Any] = (a + b) / 2
# Check if middle point is root
if equation(__UpperCamelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(__UpperCamelCase ) * equation(__UpperCamelCase ) < 0:
UpperCAmelCase_ : List[Any] = c
else:
UpperCAmelCase_ : str = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 541 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_lowerCamelCase = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 144 | 0 |
"""simple docstring"""
from math import isqrt
def __a ( A ):
'''simple docstring'''
return all(number % divisor != 0 for divisor in range(2 , isqrt(A ) + 1 ) )
def __a ( A = 10**6 ):
'''simple docstring'''
lowercase__ = 0
lowercase__ = 1
lowercase__ = 7
while prime_candidate < max_prime:
primes_count += is_prime(A )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'{solution() = }')
| 704 | """simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class a__ ( _a ):
snake_case_ = (IPNDMScheduler,)
snake_case_ = (("num_inference_steps", 50),)
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = {"num_train_timesteps": 1000}
config.update(**_UpperCAmelCase )
return config
def snake_case__ ( self, _UpperCAmelCase=0, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config(**_UpperCAmelCase )
lowercase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
lowercase__ = dummy_past_residuals[:]
if time_step is None:
lowercase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
lowercase__ = scheduler_class.from_pretrained(_UpperCAmelCase )
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self, _UpperCAmelCase=0, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
lowercase__ = dummy_past_residuals[:]
if time_step is None:
lowercase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
lowercase__ = scheduler_class.from_pretrained(_UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(**_UpperCAmelCase )
lowercase__ = scheduler_class(**_UpperCAmelCase )
lowercase__ = 10
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowercase__ = model(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
lowercase__ = model(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ).prev_sample
return sample
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase )
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**_UpperCAmelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(_UpperCAmelCase, "set_timesteps" ):
scheduler.set_timesteps(_UpperCAmelCase )
elif num_inference_steps is not None and not hasattr(_UpperCAmelCase, "set_timesteps" ):
lowercase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.timesteps[5]
lowercase__ = scheduler.timesteps[6]
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def snake_case__ ( self ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase, time_step=_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ):
self.check_over_forward(num_inference_steps=_UpperCAmelCase, time_step=_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.full_loop()
lowercase__ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_mean.item() - 254_0529 ) < 10
| 668 | 0 |
def UpperCAmelCase ( a_ ) -> bool:
"""simple docstring"""
if not isinstance(a_ , a_ ):
__A = F'''Input value of [number={number}] must be an integer'''
raise TypeError(a_ )
if number < 0:
return False
__A = number * number
while number > 0:
if number % 1_0 != number_square % 1_0:
return False
number //= 1_0
number_square //= 1_0
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55 |
"""simple docstring"""
class lowerCAmelCase__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None ):
'''simple docstring'''
A__ = data
A__ = previous
A__ = next_node
def __str__( self ):
'''simple docstring'''
return f"""{self.data}"""
def lowercase_ ( self ):
'''simple docstring'''
return self.data
def lowercase_ ( self ):
'''simple docstring'''
return self.next
def lowercase_ ( self ):
'''simple docstring'''
return self.previous
class lowerCAmelCase__ :
def __init__( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = head
def __iter__( self ):
'''simple docstring'''
return self
def lowercase_ ( self ):
'''simple docstring'''
if not self.current:
raise StopIteration
else:
A__ = self.current.get_data()
A__ = self.current.get_next()
return value
class lowerCAmelCase__ :
def __init__( self ):
'''simple docstring'''
A__ = None # First node in list
A__ = None # Last node in list
def __str__( self ):
'''simple docstring'''
A__ = self.head
A__ = []
while current is not None:
nodes.append(current.get_data() )
A__ = current.get_next()
return " ".join(str(UpperCamelCase__ ) for node in nodes )
def __contains__( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = self.head
while current:
if current.get_data() == value:
return True
A__ = current.get_next()
return False
def __iter__( self ):
'''simple docstring'''
return LinkedListIterator(self.head )
def lowercase_ ( self ):
'''simple docstring'''
if self.head:
return self.head.get_data()
return None
def lowercase_ ( self ):
'''simple docstring'''
if self.tail:
return self.tail.get_data()
return None
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
if self.head is None:
A__ = node
A__ = node
else:
self.insert_before_node(self.head , UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
if self.head is None:
self.set_head(UpperCamelCase__ )
else:
self.insert_after_node(self.tail , UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = Node(UpperCamelCase__ )
if self.head is None:
self.set_head(UpperCamelCase__ )
else:
self.set_tail(UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = node
A__ = node.previous
if node.get_previous() is None:
A__ = node_to_insert
else:
A__ = node_to_insert
A__ = node_to_insert
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = node
A__ = node.next
if node.get_next() is None:
A__ = node_to_insert
else:
A__ = node_to_insert
A__ = node_to_insert
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = 1
A__ = Node(UpperCamelCase__ )
A__ = self.head
while node:
if current_position == position:
self.insert_before_node(UpperCamelCase__ , UpperCamelCase__ )
return
current_position += 1
A__ = node.next
self.insert_after_node(self.tail , UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = self.head
while node:
if node.get_data() == item:
return node
A__ = node.get_next()
raise Exception("Node not found" )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
if (node := self.get_node(UpperCamelCase__ )) is not None:
if node == self.head:
A__ = self.head.get_next()
if node == self.tail:
A__ = self.tail.get_previous()
self.remove_node_pointers(UpperCamelCase__ )
@staticmethod
def lowercase_ ( UpperCamelCase__ ):
'''simple docstring'''
if node.get_next():
A__ = node.previous
if node.get_previous():
A__ = node.next
A__ = None
A__ = None
def lowercase_ ( self ):
'''simple docstring'''
return self.head is None
def __a ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod() | 337 | 0 |
"""simple docstring"""
import cmath
import math
def lowerCAmelCase (__UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : float ):
"""simple docstring"""
__UpperCamelCase =math.radians(__UpperCamelCase )
__UpperCamelCase =math.radians(__UpperCamelCase )
# Convert voltage and current to rectangular form
__UpperCamelCase =cmath.rect(__UpperCamelCase , __UpperCamelCase )
__UpperCamelCase =cmath.rect(__UpperCamelCase , __UpperCamelCase )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 296 | """simple docstring"""
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
__lowercase = 299_792_458
# Symbols
__lowercase , __lowercase , __lowercase , __lowercase = symbols('''ct x y z''')
def lowerCAmelCase (__UpperCamelCase : float ):
"""simple docstring"""
if velocity > c:
raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('''Speed must be greater than or equal to 1!''' )
return velocity / c
def lowerCAmelCase (__UpperCamelCase : float ):
"""simple docstring"""
return 1 / sqrt(1 - beta(__UpperCamelCase ) ** 2 )
def lowerCAmelCase (__UpperCamelCase : float ):
"""simple docstring"""
return np.array(
[
[gamma(__UpperCamelCase ), -gamma(__UpperCamelCase ) * beta(__UpperCamelCase ), 0, 0],
[-gamma(__UpperCamelCase ) * beta(__UpperCamelCase ), gamma(__UpperCamelCase ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowerCAmelCase (__UpperCamelCase : float , __UpperCamelCase : np.ndarray | None = None ):
"""simple docstring"""
if event is None:
__UpperCamelCase =np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(__UpperCamelCase ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
__lowercase = transform(29_979_245)
print('''Example of four vector: ''')
print(f'''ct\' = {four_vector[0]}''')
print(f'''x\' = {four_vector[1]}''')
print(f'''y\' = {four_vector[2]}''')
print(f'''z\' = {four_vector[3]}''')
# Substitute symbols with numerical values
__lowercase = {ct: c, x: 1, y: 1, z: 1}
__lowercase = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f'''\n{numerical_vector}''')
| 296 | 1 |
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = '▁'
__a = {'vocab_file': 'vocab.txt', 'sentencepiece_model_ckpt': 'sentencepiece.bpe.model'}
__a = {
'sentencepiece_model_file': 'sentencepiece.bpe.model',
'vocab_file': 'vocab.txt',
}
__a = {
'vocab_file': {
'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt',
'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt',
},
'sentencepiece_model_file': {
'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model',
'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model',
},
}
__a = {
'ernie-m-base': 5_1_4,
'ernie-m-large': 5_1_4,
}
__a = {
'ernie-m-base': {'do_lower_case': False},
'ernie-m-large': {'do_lower_case': False},
}
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :List[str] = ["input_ids"]
a :Dict = VOCAB_FILES_NAMES
a :Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
a :Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a :List[str] = PRETRAINED_VOCAB_FILES_MAP
a :Tuple = RESOURCE_FILES_NAMES
def __init__( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : List[Any]=False , SCREAMING_SNAKE_CASE_ : Optional[Any]="utf8" , SCREAMING_SNAKE_CASE_ : List[Any]="[UNK]" , SCREAMING_SNAKE_CASE_ : Dict="[SEP]" , SCREAMING_SNAKE_CASE_ : Union[str, Any]="[PAD]" , SCREAMING_SNAKE_CASE_ : Optional[int]="[CLS]" , SCREAMING_SNAKE_CASE_ : Optional[int]="[MASK]" , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowercase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , vocab_file=SCREAMING_SNAKE_CASE_ , encoding=SCREAMING_SNAKE_CASE_ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE_ , )
lowercase_ = do_lower_case
lowercase_ = sentencepiece_model_ckpt
lowercase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE_ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowercase_ = self.load_vocab(filepath=SCREAMING_SNAKE_CASE_ )
else:
lowercase_ = {self.sp_model.id_to_piece(SCREAMING_SNAKE_CASE_ ): id for id in range(self.sp_model.get_piece_size() )}
lowercase_ = {v: k for k, v in self.vocab.items()}
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : str ) -> List[Any]:
if text is None:
return None
lowercase_ = self.tokenize(SCREAMING_SNAKE_CASE_ )
lowercase_ , lowercase_ = '''''', []
for i, ch in enumerate(SCREAMING_SNAKE_CASE_ ):
if ch in self.SP_CHAR_MAPPING:
lowercase_ = self.SP_CHAR_MAPPING.get(SCREAMING_SNAKE_CASE_ )
else:
lowercase_ = unicodedata.normalize('''NFKC''' , SCREAMING_SNAKE_CASE_ )
if self.is_whitespace(SCREAMING_SNAKE_CASE_ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(SCREAMING_SNAKE_CASE_ ) )
lowercase_ , lowercase_ , lowercase_ = normalized_text, [], 0
if self.do_lower_case:
lowercase_ = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowercase_ = token[1:]
lowercase_ = text[offset:].index(SCREAMING_SNAKE_CASE_ ) + offset
lowercase_ = start + len(SCREAMING_SNAKE_CASE_ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowercase_ = end
return token_mapping
@property
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
return len(self.vocab )
def _lowercase ( self : Any ) -> Optional[int]:
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : Dict ) -> int:
lowercase_ = self.__dict__.copy()
lowercase_ = None
return state
def __setstate__( self : int , SCREAMING_SNAKE_CASE_ : Any ) -> List[Any]:
lowercase_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase_ = {}
lowercase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple ) -> str:
return "".join((self.SP_CHAR_MAPPING.get(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for c in text) )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any]=False , SCREAMING_SNAKE_CASE_ : Optional[int]=6_4 , SCREAMING_SNAKE_CASE_ : List[str]=0.1 ) -> Any:
if self.sp_model_kwargs.get('''enable_sampling''' ) is True:
lowercase_ = True
if self.sp_model_kwargs.get('''alpha''' ) is not None:
lowercase_ = self.sp_model_kwargs.get('''alpha''' )
if self.sp_model_kwargs.get('''nbest_size''' ) is not None:
lowercase_ = self.sp_model_kwargs.get('''nbest_size''' )
if not enable_sampling:
lowercase_ = self.sp_model.EncodeAsPieces(SCREAMING_SNAKE_CASE_ )
else:
lowercase_ = self.sp_model.SampleEncodeAsPieces(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = []
for pi, piece in enumerate(SCREAMING_SNAKE_CASE_ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(SCREAMING_SNAKE_CASE_ ) and pi != 0:
new_pieces.append(SCREAMING_SNAKE_CASE_ )
continue
else:
continue
lowercase_ = 0
for i, chunk in enumerate(SCREAMING_SNAKE_CASE_ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(SCREAMING_SNAKE_CASE_ ) or self.is_punct(SCREAMING_SNAKE_CASE_ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(SCREAMING_SNAKE_CASE_ )
lowercase_ = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase_ = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase_ = i
if len(SCREAMING_SNAKE_CASE_ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Any ) -> Optional[Any]:
lowercase_ = ''''''.join(SCREAMING_SNAKE_CASE_ ).replace(SCREAMING_SNAKE_CASE_ , ''' ''' ).strip()
return out_string
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple ) -> Any:
lowercase_ = self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
lowercase_ = ''''''.join(SCREAMING_SNAKE_CASE_ ).replace(SCREAMING_SNAKE_CASE_ , ''' ''' ).strip()
return out_string
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : List[str] ) -> List[str]:
return self.vocab.get(SCREAMING_SNAKE_CASE_ , self.vocab.get(self.unk_token ) )
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : List[Any] ) -> List[Any]:
return self.reverse_vocab.get(SCREAMING_SNAKE_CASE_ , self.unk_token )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str=None ) -> List[str]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase_ = [self.cls_token_id]
lowercase_ = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str]=None ) -> int:
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : Optional[int]=False ) -> Any:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(SCREAMING_SNAKE_CASE_ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(SCREAMING_SNAKE_CASE_ ) + 1) + [1] * (len(SCREAMING_SNAKE_CASE_ ) + 3)
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] ) -> Optional[int]:
if "\u4e00" <= char <= "\u9fff":
return True
return False
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Tuple:
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : int ) -> List[Any]:
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[str, Any]:
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(SCREAMING_SNAKE_CASE_ ) == 1:
lowercase_ = unicodedata.category(SCREAMING_SNAKE_CASE_ )
if cat == "Zs":
return True
return False
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : Any ) -> Tuple:
lowercase_ = {}
with io.open(SCREAMING_SNAKE_CASE_ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(SCREAMING_SNAKE_CASE_ ):
lowercase_ = line.rstrip('''\n''' )
lowercase_ = int(SCREAMING_SNAKE_CASE_ )
return token_to_idx
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]:
lowercase_ = 0
if os.path.isdir(SCREAMING_SNAKE_CASE_ ):
lowercase_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
lowercase_ = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
''' Please check that the vocabulary is not corrupted!''' )
lowercase_ = token_index
writer.write(token + '''\n''' )
index += 1
lowercase_ = os.path.join(SCREAMING_SNAKE_CASE_ , '''sentencepiece.bpe.model''' )
with open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as fi:
lowercase_ = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (vocab_file,)
| 97 | '''simple docstring'''
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_UpperCamelCase : Dict = logging.get_logger(__name__)
_UpperCamelCase : Optional[int] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
_UpperCamelCase : str = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
_UpperCamelCase : int = {'facebook/blenderbot-3B': 1_28}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __snake_case ( ):
__UpperCAmelCase = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
__UpperCAmelCase = bs[:]
__UpperCAmelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCAmelCase )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase = [chr(lowerCAmelCase ) for n in cs]
return dict(zip(lowerCAmelCase , lowerCAmelCase ) )
def __snake_case ( lowerCAmelCase : List[Any] ):
__UpperCAmelCase = set()
__UpperCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase = char
return pairs
class _lowercase( _lowerCamelCase ):
"""simple docstring"""
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self: Union[str, Any] ,a: Tuple ,a: Dict ,a: Dict="replace" ,a: int="<s>" ,a: List[str]="</s>" ,a: Any="</s>" ,a: str="<s>" ,a: Dict="<unk>" ,a: Union[str, Any]="<pad>" ,a: Optional[int]="<mask>" ,a: int=False ,**a: int ,):
__UpperCAmelCase = AddedToken(a ,lstrip=a ,rstrip=a ) if isinstance(a ,a ) else bos_token
__UpperCAmelCase = AddedToken(a ,lstrip=a ,rstrip=a ) if isinstance(a ,a ) else eos_token
__UpperCAmelCase = AddedToken(a ,lstrip=a ,rstrip=a ) if isinstance(a ,a ) else sep_token
__UpperCAmelCase = AddedToken(a ,lstrip=a ,rstrip=a ) if isinstance(a ,a ) else cls_token
__UpperCAmelCase = AddedToken(a ,lstrip=a ,rstrip=a ) if isinstance(a ,a ) else unk_token
__UpperCAmelCase = AddedToken(a ,lstrip=a ,rstrip=a ) if isinstance(a ,a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase = AddedToken(a ,lstrip=a ,rstrip=a ) if isinstance(a ,a ) else mask_token
super().__init__(
errors=a ,bos_token=a ,eos_token=a ,unk_token=a ,sep_token=a ,cls_token=a ,pad_token=a ,mask_token=a ,add_prefix_space=a ,**a ,)
with open(a ,encoding='utf-8' ) as vocab_handle:
__UpperCAmelCase = json.load(a )
__UpperCAmelCase = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase = errors # how to handle errors in decoding
__UpperCAmelCase = bytes_to_unicode()
__UpperCAmelCase = {v: k for k, v in self.byte_encoder.items()}
with open(a ,encoding='utf-8' ) as merges_handle:
__UpperCAmelCase = merges_handle.read().split('\n' )[1:-1]
__UpperCAmelCase = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCAmelCase = dict(zip(a ,range(len(a ) ) ) )
__UpperCAmelCase = {}
__UpperCAmelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def snake_case ( self: Optional[Any] ):
return len(self.encoder )
def snake_case ( self: Optional[Any] ):
return dict(self.encoder ,**self.added_tokens_encoder )
def snake_case ( self: Optional[int] ,a: Optional[int] ):
if token in self.cache:
return self.cache[token]
__UpperCAmelCase = tuple(a )
__UpperCAmelCase = get_pairs(a )
if not pairs:
return token
while True:
__UpperCAmelCase = min(a ,key=lambda a : self.bpe_ranks.get(a ,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase = bigram
__UpperCAmelCase = []
__UpperCAmelCase = 0
while i < len(a ):
try:
__UpperCAmelCase = word.index(a ,a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase = j
if word[i] == first and i < len(a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase = tuple(a )
__UpperCAmelCase = new_word
if len(a ) == 1:
break
else:
__UpperCAmelCase = get_pairs(a )
__UpperCAmelCase = ' '.join(a )
__UpperCAmelCase = word
return word
def snake_case ( self: int ,a: str ):
__UpperCAmelCase = []
for token in re.findall(self.pat ,a ):
__UpperCAmelCase = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(a ).split(' ' ) )
return bpe_tokens
def snake_case ( self: Optional[Any] ,a: Union[str, Any] ):
return self.encoder.get(a ,self.encoder.get(self.unk_token ) )
def snake_case ( self: Any ,a: Union[str, Any] ):
return self.decoder.get(a )
def snake_case ( self: Dict ,a: Union[str, Any] ):
__UpperCAmelCase = ''.join(a )
__UpperCAmelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' ,errors=self.errors )
return text
def snake_case ( self: Optional[Any] ,a: str ,a: Optional[str] = None ):
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase = os.path.join(
a ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__UpperCAmelCase = os.path.join(
a ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(a ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=a ,ensure_ascii=a ) + '\n' )
__UpperCAmelCase = 0
with open(a ,'w' ,encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda a : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
__UpperCAmelCase = token_index
writer.write(' '.join(a ) + '\n' )
index += 1
return vocab_file, merge_file
def snake_case ( self: List[str] ,a: List[int] ,a: Optional[List[int]] = None ,a: bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a ,token_ids_a=a ,already_has_special_tokens=a )
if token_ids_a is None:
return [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1, 1] + ([0] * len(a )) + [1]
def snake_case ( self: Optional[int] ,a: List[int] ,a: Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case ( self: Dict ,a: List[Any] ,a: Optional[int]=False ,**a: Optional[Any] ):
__UpperCAmelCase = kwargs.pop('add_prefix_space' ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(a ) > 0 and not text[0].isspace()):
__UpperCAmelCase = ' ' + text
return (text, kwargs)
def snake_case ( self: Tuple ,a: List[int] ,a: Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def snake_case ( self: Any ,a: "Conversation" ):
__UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(a )
__UpperCAmelCase = ' '.join(a )
__UpperCAmelCase = self.encode(a )
if len(a ) > self.model_max_length:
__UpperCAmelCase = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 396 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE : Optional[Any] = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : int = ["ViTFeatureExtractor"]
SCREAMING_SNAKE_CASE : Dict = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[int] = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 721 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE : Dict = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
SCREAMING_SNAKE_CASE : Any = 250_004
SCREAMING_SNAKE_CASE : Optional[int] = 250_020
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
lowercase : Union[str, Any] =MBartTokenizer
lowercase : Dict =MBartTokenizerFast
lowercase : Dict =True
lowercase : List[Any] =True
def UpperCamelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ :Optional[Any] = MBartTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = MBartTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ )
lowercase_ :Union[str, Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowercase_ :List[str] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowercase_ :Dict = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowercase_ :Optional[Any] = tokenizer.convert_ids_to_tokens(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def UpperCamelCase ( self ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowercase_ :Optional[Any] = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowercase_ :Optional[int] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
lowercase_ :int = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
lowercase_ :Optional[int] = tempfile.mkdtemp()
lowercase_ :Union[str, Any] = tokenizer_r.save_pretrained(UpperCamelCase_ )
lowercase_ :int = tokenizer_p.save_pretrained(UpperCamelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowercase_ :Union[str, Any] = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_ )
# Checks everything loads correctly in the same way
lowercase_ :str = tokenizer_r.from_pretrained(UpperCamelCase_ )
lowercase_ :Optional[Any] = tokenizer_p.from_pretrained(UpperCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(UpperCamelCase_ )
# Save tokenizer rust, legacy_format=True
lowercase_ :int = tempfile.mkdtemp()
lowercase_ :Optional[Any] = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_ )
lowercase_ :Any = tokenizer_p.save_pretrained(UpperCamelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_ )
# Checks everything loads correctly in the same way
lowercase_ :Optional[int] = tokenizer_r.from_pretrained(UpperCamelCase_ )
lowercase_ :Dict = tokenizer_p.from_pretrained(UpperCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_ ) )
shutil.rmtree(UpperCamelCase_ )
# Save tokenizer rust, legacy_format=False
lowercase_ :Tuple = tempfile.mkdtemp()
lowercase_ :Optional[int] = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_ )
lowercase_ :Tuple = tokenizer_p.save_pretrained(UpperCamelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowercase_ :int = tokenizer_r.from_pretrained(UpperCamelCase_ )
lowercase_ :Any = tokenizer_p.from_pretrained(UpperCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_ ) )
shutil.rmtree(UpperCamelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowercase : List[Any] ="""facebook/mbart-large-en-ro"""
lowercase : Optional[Any] =[
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
lowercase : Dict =[
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
lowercase : Tuple =[8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2, EN_CODE]
@classmethod
def UpperCamelCase ( cls ):
lowercase_ :MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
lowercase_ :Optional[int] = 1
return cls
def UpperCamelCase ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 25_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 25_0004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 25_0020 )
def UpperCamelCase ( self ):
lowercase_ :Dict = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase_ )
def UpperCamelCase ( self ):
self.assertIn(UpperCamelCase_ , self.tokenizer.all_special_ids )
lowercase_ :List[Any] = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
lowercase_ :str = self.tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
lowercase_ :List[str] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase_ )
def UpperCamelCase ( self ):
lowercase_ :Dict = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , UpperCamelCase_ )
lowercase_ :Dict = 10
lowercase_ :Any = self.tokenizer(UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , UpperCamelCase_ )
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
def UpperCamelCase ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_0026, 25_0001] )
def UpperCamelCase ( self ):
lowercase_ :List[Any] = tempfile.mkdtemp()
lowercase_ :List[Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(UpperCamelCase_ )
lowercase_ :Tuple = MBartTokenizer.from_pretrained(UpperCamelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCamelCase_ )
@require_torch
def UpperCamelCase ( self ):
lowercase_ :str = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , return_tensors='''pt''' )
lowercase_ :Union[str, Any] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def UpperCamelCase ( self ):
lowercase_ :Any = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
lowercase_ :Tuple = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
lowercase_ :List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def UpperCamelCase ( self ):
lowercase_ :Any = self.tokenizer(self.src_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=3 , return_tensors='''pt''' )
lowercase_ :int = self.tokenizer(
text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=10 , return_tensors='''pt''' )
lowercase_ :Optional[int] = targets['''input_ids''']
lowercase_ :List[Any] = shift_tokens_right(UpperCamelCase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCamelCase ( self ):
lowercase_ :int = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(UpperCamelCase_ ) , {
# A, test, EOS, en_XX
'''input_ids''': [[62, 3034, 2, 25_0004]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_0001,
} , )
| 441 | 0 |
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class a ( unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=32 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=4 , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[int] = parent
__SCREAMING_SNAKE_CASE: List[str] = batch_size
__SCREAMING_SNAKE_CASE: Union[str, Any] = seq_length
__SCREAMING_SNAKE_CASE: Union[str, Any] = is_training
__SCREAMING_SNAKE_CASE: List[str] = use_attention_mask
__SCREAMING_SNAKE_CASE: List[str] = use_token_type_ids
__SCREAMING_SNAKE_CASE: Union[str, Any] = use_labels
__SCREAMING_SNAKE_CASE: Optional[int] = vocab_size
__SCREAMING_SNAKE_CASE: str = hidden_size
__SCREAMING_SNAKE_CASE: Optional[Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE: List[Any] = num_attention_heads
__SCREAMING_SNAKE_CASE: Any = intermediate_size
__SCREAMING_SNAKE_CASE: Optional[Any] = hidden_act
__SCREAMING_SNAKE_CASE: str = hidden_dropout_prob
__SCREAMING_SNAKE_CASE: int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE: Optional[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE: List[str] = type_vocab_size
__SCREAMING_SNAKE_CASE: str = type_sequence_label_size
__SCREAMING_SNAKE_CASE: List[Any] = initializer_range
__SCREAMING_SNAKE_CASE: Tuple = num_choices
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE: List[str] = None
if self.use_attention_mask:
__SCREAMING_SNAKE_CASE: List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE: int = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE: Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE: Union[str, Any] = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[int] = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE: Any = config_and_inputs
__SCREAMING_SNAKE_CASE: Any = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Tuple = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE: str = config_and_inputs
__SCREAMING_SNAKE_CASE: Dict = True
__SCREAMING_SNAKE_CASE: List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__SCREAMING_SNAKE_CASE: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class a ( a__ ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ : Tuple = True
SCREAMING_SNAKE_CASE__ : List[Any] = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Union[str, Any] = FlaxRobertaPreLayerNormModelTester(self )
@slow
def snake_case_ ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
__SCREAMING_SNAKE_CASE: Dict = model_class_name.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE: Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_flax
class a ( unittest.TestCase ):
@slow
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: str = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE: List[str] = np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa )
__SCREAMING_SNAKE_CASE: Tuple = model(SCREAMING_SNAKE_CASE__ )[0]
__SCREAMING_SNAKE_CASE: Union[str, Any] = [1, 11, 50265]
self.assertEqual(list(output.shape ) , SCREAMING_SNAKE_CASE__ )
# compare the actual values for a slice.
__SCREAMING_SNAKE_CASE: List[Any] = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
@slow
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = FlaxRobertaPreLayerNormModel.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE: str = np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa )
__SCREAMING_SNAKE_CASE: Optional[Any] = model(SCREAMING_SNAKE_CASE__ )[0]
# compare the actual values for a slice.
__SCREAMING_SNAKE_CASE: List[str] = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
| 202 |
"""simple docstring"""
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
UpperCAmelCase__ : int = logging.getLogger(__name__)
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Tuple = '''sequence-classification'''
def __init__(self , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
if type(SCREAMING_SNAKE_CASE__ ) == dict:
SCREAMING_SNAKE_CASE__ : List[str] = Namespace(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = glue_output_modes[hparams.task]
SCREAMING_SNAKE_CASE__ : Optional[Any] = glue_tasks_num_labels[hparams.task]
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.mode )
def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
return self.model(**SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
SCREAMING_SNAKE_CASE__ : Any = self(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = outputs[0]
SCREAMING_SNAKE_CASE__ : List[str] = self.trainer.lr_schedulers[0]["""scheduler"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.hparams
SCREAMING_SNAKE_CASE__ : Optional[int] = processors[args.task]()
SCREAMING_SNAKE_CASE__ : Optional[int] = processor.get_labels()
for mode in ["train", "dev"]:
SCREAMING_SNAKE_CASE__ : List[str] = self._feature_file(SCREAMING_SNAKE_CASE__ )
if os.path.exists(SCREAMING_SNAKE_CASE__ ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , SCREAMING_SNAKE_CASE__ )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
SCREAMING_SNAKE_CASE__ : str = (
processor.get_dev_examples(args.data_dir )
if mode == """dev"""
else processor.get_train_examples(args.data_dir )
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = convert_examples_to_features(
SCREAMING_SNAKE_CASE__ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("""Saving features into cached file %s""" , SCREAMING_SNAKE_CASE__ )
torch.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False ) -> DataLoader:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = """dev""" if mode == """test""" else mode
SCREAMING_SNAKE_CASE__ : Optional[int] = self._feature_file(SCREAMING_SNAKE_CASE__ )
logger.info("""Loading features from cached file %s""" , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = torch.load(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
SCREAMING_SNAKE_CASE__ : str = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , batch_size=SCREAMING_SNAKE_CASE__ , shuffle=SCREAMING_SNAKE_CASE__ , )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
SCREAMING_SNAKE_CASE__ : List[Any] = self(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = outputs[:2]
SCREAMING_SNAKE_CASE__ : int = logits.detach().cpu().numpy()
SCREAMING_SNAKE_CASE__ : List[str] = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = torch.stack([x["""val_loss"""] for x in outputs] ).mean().detach().cpu().item()
SCREAMING_SNAKE_CASE__ : Any = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
SCREAMING_SNAKE_CASE__ : int = np.argmax(SCREAMING_SNAKE_CASE__ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.squeeze(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
SCREAMING_SNAKE_CASE__ : List[Any] = [[] for _ in range(out_label_ids.shape[0] )]
SCREAMING_SNAKE_CASE__ : List[str] = [[] for _ in range(out_label_ids.shape[0] )]
SCREAMING_SNAKE_CASE__ : Tuple = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
SCREAMING_SNAKE_CASE__ : int = dict(results.items() )
SCREAMING_SNAKE_CASE__ : str = results
return ret, preds_list, out_label_list
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self._eval_end(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self._eval_end(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __magic_name__ (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
BaseTransformer.add_model_specific_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
parser.add_argument(
"""--max_seq_length""" , default=1_28 , type=SCREAMING_SNAKE_CASE__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--task""" , default="""""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""The GLUE task to run""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=SCREAMING_SNAKE_CASE__ , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser()
add_generic_args(_snake_case ,os.getcwd() )
SCREAMING_SNAKE_CASE__ : Optional[Any] = GLUETransformer.add_model_specific_args(_snake_case ,os.getcwd() )
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(
"""./results""" ,f'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' ,)
os.makedirs(args.output_dir )
SCREAMING_SNAKE_CASE__ : Dict = GLUETransformer(_snake_case )
SCREAMING_SNAKE_CASE__ : int = generic_train(_snake_case ,_snake_case )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
SCREAMING_SNAKE_CASE__ : Any = sorted(glob.glob(os.path.join(args.output_dir ,"""checkpoint-epoch=*.ckpt""" ) ,recursive=_snake_case ) )
SCREAMING_SNAKE_CASE__ : Dict = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_snake_case )
if __name__ == "__main__":
main()
| 223 | 0 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_snake_case : List[Any] = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def snake_case_ ():
'''simple docstring'''
_a = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_a = get_sagemaker_input()
else:
_a = get_cluster_input()
return config
def snake_case_ (UpperCamelCase : Union[str, Any]=None ):
'''simple docstring'''
if subparsers is not None:
_a = subparsers.add_parser('''config''' , description=UpperCamelCase )
else:
_a = argparse.ArgumentParser('''Accelerate config command''' , description=UpperCamelCase )
parser.add_argument(
'''--config_file''' , default=UpperCamelCase , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase )
return parser
def snake_case_ (UpperCamelCase : List[Any] ):
'''simple docstring'''
_a = get_user_input()
if args.config_file is not None:
_a = args.config_file
else:
if not os.path.isdir(UpperCamelCase ):
os.makedirs(UpperCamelCase )
_a = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(UpperCamelCase )
else:
config.to_yaml_file(UpperCamelCase )
print(f'accelerate configuration saved at {config_file}' )
def snake_case_ ():
'''simple docstring'''
_a = config_command_parser()
_a = parser.parse_args()
config_command(UpperCamelCase )
if __name__ == "__main__":
main()
| 377 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case : str = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
_snake_case : Union[str, Any] = 256047
_snake_case : Tuple = 256145
@require_sentencepiece
@require_tokenizers
class A ( _a ,unittest.TestCase ):
lowercase_ = NllbTokenizer
lowercase_ = NllbTokenizerFast
lowercase_ = True
lowercase_ = True
lowercase_ = {}
def __lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_a = NllbTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
_a = NllbTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ )
_a = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCAmelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
_a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_a = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_a = tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def __lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
_a = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-nllb''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_a = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_a = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_a = tempfile.mkdtemp()
_a = tokenizer_r.save_pretrained(lowerCAmelCase_ )
_a = tokenizer_p.save_pretrained(lowerCAmelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
_a = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Checks everything loads correctly in the same way
_a = tokenizer_r.from_pretrained(lowerCAmelCase_ )
_a = tokenizer_p.from_pretrained(lowerCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
shutil.rmtree(lowerCAmelCase_ )
# Save tokenizer rust, legacy_format=True
_a = tempfile.mkdtemp()
_a = tokenizer_r.save_pretrained(lowerCAmelCase_ , legacy_format=lowerCAmelCase_ )
_a = tokenizer_p.save_pretrained(lowerCAmelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Checks everything loads correctly in the same way
_a = tokenizer_r.from_pretrained(lowerCAmelCase_ )
_a = tokenizer_p.from_pretrained(lowerCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
shutil.rmtree(lowerCAmelCase_ )
# Save tokenizer rust, legacy_format=False
_a = tempfile.mkdtemp()
_a = tokenizer_r.save_pretrained(lowerCAmelCase_ , legacy_format=lowerCAmelCase_ )
_a = tokenizer_p.save_pretrained(lowerCAmelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_a = tokenizer_r.from_pretrained(lowerCAmelCase_ )
_a = tokenizer_p.from_pretrained(lowerCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
shutil.rmtree(lowerCAmelCase_ )
@require_torch
def __lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
if not self.test_seqaseq:
return
_a = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Longer text that will definitely require truncation.
_a = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for'''
''' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons'''
''' will only worsen the violence and misery for millions of people.''',
]
_a = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al'''
''' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi'''
''' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
try:
_a = tokenizer.prepare_seqaseq_batch(
src_texts=lowerCAmelCase_ , tgt_texts=lowerCAmelCase_ , max_length=3 , max_target_length=10 , return_tensors='''pt''' , src_lang='''eng_Latn''' , tgt_lang='''ron_Latn''' , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
_a = tokenizer.prepare_seqaseq_batch(
lowerCAmelCase_ , tgt_texts=lowerCAmelCase_ , max_length=3 , return_tensors='''pt''' )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
_a = tokenizer.prepare_seqaseq_batch(
src_texts=lowerCAmelCase_ , max_length=3 , max_target_length=10 , return_tensors='''pt''' )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn('''decoder_input_ids''' , lowerCAmelCase_ )
@unittest.skip('''Unfortunately way too slow to build a BPE with SentencePiece.''' )
def __lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_a = [AddedToken('''<special>''' , lstrip=lowerCAmelCase_ )]
_a = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , **lowerCAmelCase_ )
_a = tokenizer_r.encode('''Hey this is a <special> token''' )
_a = tokenizer_r.encode('''<special>''' , add_special_tokens=lowerCAmelCase_ )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
_a = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , **lowerCAmelCase_ , )
_a = self.tokenizer_class.from_pretrained(
lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , **lowerCAmelCase_ )
_a = tokenizer_p.encode('''Hey this is a <special> token''' )
_a = tokenizer_cr.encode('''Hey this is a <special> token''' )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
lowercase_ = 'facebook/nllb-200-distilled-600M'
lowercase_ = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
lowercase_ = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
lowercase_ = [
25_6047,
1_6297,
13_4408,
8165,
24_8066,
1_4734,
950,
1135,
10_5721,
3573,
83,
2_7352,
108,
4_9486,
2,
]
@classmethod
def __lowerCAmelCase ( cls : Tuple ) -> Optional[int]:
"""simple docstring"""
_a = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''eng_Latn''' , tgt_lang='''ron_Latn''' )
_a = 1
return cls
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Arab'''] , 25_60_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Latn'''] , 25_60_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''fra_Latn'''] , 25_60_57 )
def __lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
_a = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase_ )
def __lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
self.assertIn(lowerCAmelCase_ , self.tokenizer.all_special_ids )
# fmt: off
_a = [RO_CODE, 42_54, 9_80_68, 11_29_23, 3_90_72, 39_09, 7_13, 10_27_67, 26, 1_73_14, 3_56_42, 1_46_83, 3_31_18, 20_22, 6_69_87, 2, 25_60_47]
# fmt: on
_a = self.tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
_a = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
_a = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , lowerCAmelCase_ )
_a = 10
_a = self.tokenizer(lowerCAmelCase_ , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , lowerCAmelCase_ )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_62_03, 3] )
def __lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
_a = tempfile.mkdtemp()
_a = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCAmelCase_ )
_a = NllbTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase_ )
@require_torch
def __lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
_a = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
_a = shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id['''ron_Latn'''] )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
_a = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def __lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_a = self.tokenizer(self.src_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=3 , return_tensors='''pt''' )
_a = self.tokenizer(
text_target=self.tgt_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=10 , return_tensors='''pt''' )
_a = targets['''input_ids''']
_a = shift_tokens_right(
lowerCAmelCase_ , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
_a = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , {
# A, test, EOS, en_XX
'''input_ids''': [[25_60_47, 70, 73_56, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_60_57,
} , )
@require_torch
def __lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
_a = True
_a = self.tokenizer(
'''UN Chief says there is no military solution in Syria''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' )
self.assertEqual(
inputs.input_ids , [1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2, 25_60_47] )
_a = False
_a = self.tokenizer(
'''UN Chief says there is no military solution in Syria''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' )
self.assertEqual(
inputs.input_ids , [25_60_47, 1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2] )
| 377 | 1 |
from ...configuration_utils import PretrainedConfig
lowerCamelCase : List[Any] = {
"google/tapas-base-finetuned-sqa": (
"https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"
),
"google/tapas-base-finetuned-wtq": (
"https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"
),
"google/tapas-base-finetuned-wikisql-supervised": (
"https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"
),
"google/tapas-base-finetuned-tabfact": (
"https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"
),
}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''tapas'''
def __init__( self : Dict , A_ : int=30522 , A_ : int=768 , A_ : Any=12 , A_ : Tuple=12 , A_ : Optional[Any]=3072 , A_ : Optional[Any]="gelu" , A_ : Union[str, Any]=0.1 , A_ : Dict=0.1 , A_ : Optional[Any]=1024 , A_ : Tuple=[3, 256, 256, 2, 256, 256, 10] , A_ : Optional[Any]=0.02 , A_ : str=1E-12 , A_ : str=0 , A_ : Tuple=10.0 , A_ : int=0 , A_ : int=1.0 , A_ : Any=None , A_ : List[str]=1.0 , A_ : Optional[Any]=False , A_ : List[Any]=None , A_ : Optional[int]=1.0 , A_ : Union[str, Any]=1.0 , A_ : List[str]=False , A_ : Optional[int]=False , A_ : Any="ratio" , A_ : List[str]=None , A_ : Dict=None , A_ : int=64 , A_ : Union[str, Any]=32 , A_ : Dict=False , A_ : List[str]=True , A_ : int=False , A_ : Tuple=False , A_ : Dict=True , A_ : Tuple=False , A_ : Optional[Any]=None , A_ : Union[str, Any]=None , **A_ : Optional[int] , ) -> str:
"""simple docstring"""
super().__init__(pad_token_id=A_ , **A_ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = hidden_act
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_sizes
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
# Fine-tuning task hyperparameters
lowerCamelCase_ = positive_label_weight
lowerCamelCase_ = num_aggregation_labels
lowerCamelCase_ = aggregation_loss_weight
lowerCamelCase_ = use_answer_as_supervision
lowerCamelCase_ = answer_loss_importance
lowerCamelCase_ = use_normalized_answer_loss
lowerCamelCase_ = huber_loss_delta
lowerCamelCase_ = temperature
lowerCamelCase_ = aggregation_temperature
lowerCamelCase_ = use_gumbel_for_cells
lowerCamelCase_ = use_gumbel_for_aggregation
lowerCamelCase_ = average_approximation_function
lowerCamelCase_ = cell_selection_preference
lowerCamelCase_ = answer_loss_cutoff
lowerCamelCase_ = max_num_rows
lowerCamelCase_ = max_num_columns
lowerCamelCase_ = average_logits_per_cell
lowerCamelCase_ = select_one_column
lowerCamelCase_ = allow_empty_column_selection
lowerCamelCase_ = init_cell_selection_weights_to_zero
lowerCamelCase_ = reset_position_index_per_cell
lowerCamelCase_ = disable_per_token_loss
# Aggregation hyperparameters
lowerCamelCase_ = aggregation_labels
lowerCamelCase_ = no_aggregation_label_index
if isinstance(self.aggregation_labels , A_ ):
lowerCamelCase_ = {int(A_ ): v for k, v in aggregation_labels.items()}
| 70 |
"""simple docstring"""
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def __UpperCamelCase ( snake_case__ ):
return 1.0 / (1.0 + np.exp(-_outputs ))
def __UpperCamelCase ( snake_case__ ):
A_ : Union[str, Any] = np.max(_outputs , axis=-1 , keepdims=snake_case__ )
A_ : str = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=snake_case__ )
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_A : Any = """sigmoid"""
_A : Any = """softmax"""
_A : Union[str, Any] = """none"""
@add_end_docstrings(
_SCREAMING_SNAKE_CASE , R"""
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `\"default\"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `\"sigmoid\"`: Applies the sigmoid function on the output.
- `\"softmax\"`: Applies the softmax function on the output.
- `\"none\"`: Does not apply any function on the output.
""" , )
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_A : Optional[int] = False
_A : Dict = ClassificationFunction.NONE
def __init__(self , **lowerCAmelCase_ ):
super().__init__(**lowerCAmelCase_ )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def lowerCamelCase(self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_="" , **lowerCAmelCase_ ):
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
A_ : Union[str, Any] = tokenizer_kwargs
A_ : List[str] = {}
if hasattr(self.model.config , """return_all_scores""" ) and return_all_scores is None:
A_ : Optional[int] = self.model.config.return_all_scores
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or top_k is None:
A_ : Dict = top_k
A_ : Any = False
elif return_all_scores is not None:
warnings.warn(
"""`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"""
""" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.""" , lowerCAmelCase_ , )
if return_all_scores:
A_ : List[Any] = None
else:
A_ : List[str] = 1
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
A_ : str = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
A_ : List[str] = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__(self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
A_ : List[str] = super().__call__(*lowerCAmelCase_ , **lowerCAmelCase_ )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
A_ : Union[str, Any] = """top_k""" not in kwargs
if isinstance(args[0] , lowerCAmelCase_ ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def lowerCamelCase(self , lowerCAmelCase_ , **lowerCAmelCase_ ):
A_ : Union[str, Any] = self.framework
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return self.tokenizer(**lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) == 1 and isinstance(inputs[0] , lowerCAmelCase_ ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"""The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"""
""" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair.""" )
return self.tokenizer(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase(self , lowerCAmelCase_ ):
return self.model(**lowerCAmelCase_ )
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=1 , lowerCAmelCase_=True ):
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
A_ : Optional[Any] = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
A_ : Tuple = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , """function_to_apply""" ) and function_to_apply is None:
A_ : Any = self.model.config.function_to_apply
else:
A_ : Dict = ClassificationFunction.NONE
A_ : Optional[Any] = model_outputs["""logits"""][0]
A_ : Tuple = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
A_ : str = sigmoid(lowerCAmelCase_ )
elif function_to_apply == ClassificationFunction.SOFTMAX:
A_ : Dict = softmax(lowerCAmelCase_ )
elif function_to_apply == ClassificationFunction.NONE:
A_ : Optional[int] = outputs
else:
raise ValueError(f"""Unrecognized `function_to_apply` argument: {function_to_apply}""" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
A_ : Optional[Any] = [
{"""label""": self.model.config.idalabel[i], """score""": score.item()} for i, score in enumerate(lowerCAmelCase_ )
]
if not _legacy:
dict_scores.sort(key=lambda lowerCAmelCase_ : x["score"] , reverse=lowerCAmelCase_ )
if top_k is not None:
A_ : str = dict_scores[:top_k]
return dict_scores
| 180 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
A_ = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
A_ = get_activation("gelu" )
self.assertTrue(torch.allclose(gelu_python(_A ) , torch_builtin(_A ) ) )
self.assertFalse(torch.allclose(gelu_python(_A ) , gelu_new(_A ) ) )
def lowerCamelCase__ ( self : int ) -> Tuple:
"""simple docstring"""
A_ = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
A_ = get_activation("gelu" )
A_ = get_activation("gelu_10" )
A_ = torch_builtin(_A )
A_ = geluaa(_A )
A_ = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 )
self.assertTrue(torch.max(_A ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowerCamelCase__ ( self : int ) -> int:
"""simple docstring"""
get_activation("gelu" )
get_activation("gelu_10" )
get_activation("gelu_fast" )
get_activation("gelu_new" )
get_activation("gelu_python" )
get_activation("gelu_pytorch_tanh" )
get_activation("linear" )
get_activation("mish" )
get_activation("quick_gelu" )
get_activation("relu" )
get_activation("sigmoid" )
get_activation("silu" )
get_activation("swish" )
get_activation("tanh" )
with self.assertRaises(_A ):
get_activation("bogus" )
with self.assertRaises(_A ):
get_activation(_A )
def lowerCamelCase__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
A_ = get_activation("gelu" )
A_ = 1
A_ = get_activation("gelu" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(_A ):
A_ = acta.a
| 714 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ : Tuple = logging.get_logger(__name__)
UpperCamelCase_ : Optional[int] = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class __lowerCAmelCase ( _lowercase ):
"""simple docstring"""
snake_case = "markuplm"
def __init__( self : Optional[Any] , _snake_case : Dict=30_522 , _snake_case : str=768 , _snake_case : Tuple=12 , _snake_case : Union[str, Any]=12 , _snake_case : Union[str, Any]=3_072 , _snake_case : List[str]="gelu" , _snake_case : List[str]=0.1 , _snake_case : Optional[Any]=0.1 , _snake_case : Dict=512 , _snake_case : Union[str, Any]=2 , _snake_case : Tuple=0.0_2 , _snake_case : Tuple=1e-12 , _snake_case : Tuple=0 , _snake_case : Tuple=0 , _snake_case : Optional[Any]=2 , _snake_case : int=256 , _snake_case : Dict=1_024 , _snake_case : List[str]=216 , _snake_case : Optional[int]=1_001 , _snake_case : Union[str, Any]=32 , _snake_case : List[str]=50 , _snake_case : str="absolute" , _snake_case : Optional[Any]=True , _snake_case : str=None , **_snake_case : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case , )
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = hidden_act
A_ = intermediate_size
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = initializer_range
A_ = layer_norm_eps
A_ = position_embedding_type
A_ = use_cache
A_ = classifier_dropout
# additional properties
A_ = max_depth
A_ = max_xpath_tag_unit_embeddings
A_ = max_xpath_subs_unit_embeddings
A_ = tag_pad_id
A_ = subs_pad_id
A_ = xpath_unit_hidden_size
| 482 | 0 |
def __snake_case ( lowerCAmelCase_ = 1_0_0 ) -> int:
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 100 | from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__)
class snake_case ( UpperCamelCase_ ):
lowercase_ = ['pixel_values']
def __init__( self : List[Any] , a_ : bool = True , a_ : Union[int, float] = 1 / 255 , a_ : bool = True , a_ : int = 8 , **a_ : Union[str, Any] , )-> None:
"""simple docstring"""
super().__init__(**a_ )
SCREAMING_SNAKE_CASE__ : List[str] = do_rescale
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rescale_factor
SCREAMING_SNAKE_CASE__ : Dict = do_pad
SCREAMING_SNAKE_CASE__ : Any = pad_size
def __lowercase( self : str , a_ : np.ndarray , a_ : float , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : str )-> np.ndarray:
"""simple docstring"""
return rescale(a_ , scale=a_ , data_format=a_ , **a_ )
def __lowercase( self : Any , a_ : np.ndarray , a_ : int , a_ : Optional[Union[str, ChannelDimension]] = None )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = get_image_size(a_ )
SCREAMING_SNAKE_CASE__ : Tuple = (old_height // size + 1) * size - old_height
SCREAMING_SNAKE_CASE__ : List[Any] = (old_width // size + 1) * size - old_width
return pad(a_ , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=a_ )
def __lowercase( self : Tuple , a_ : ImageInput , a_ : Optional[bool] = None , a_ : Optional[float] = None , a_ : Optional[bool] = None , a_ : Optional[int] = None , a_ : Optional[Union[str, TensorType]] = None , a_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **a_ : Dict , )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : List[str] = do_pad if do_pad is not None else self.do_pad
SCREAMING_SNAKE_CASE__ : List[str] = pad_size if pad_size is not None else self.pad_size
SCREAMING_SNAKE_CASE__ : Tuple = make_list_of_images(a_ )
if not valid_images(a_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : List[str] = [to_numpy_array(a_ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.rescale(image=a_ , scale=a_ ) for image in images]
if do_pad:
SCREAMING_SNAKE_CASE__ : str = [self.pad(a_ , size=a_ ) for image in images]
SCREAMING_SNAKE_CASE__ : List[str] = [to_channel_dimension_format(a_ , a_ ) for image in images]
SCREAMING_SNAKE_CASE__ : Tuple = {'pixel_values': images}
return BatchFeature(data=a_ , tensor_type=a_ )
| 85 | 0 |
"""simple docstring"""
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
lowercase = logging.getLogger(__name__)
lowercase = 50 # max width of layer names
lowercase = 70 # max width of quantizer names
def A__ ( _UpperCAmelCase : Optional[Any] ) -> int:
'''simple docstring'''
snake_case__ : Union[str, Any] = parser.add_argument_group("quant_trainer arguments" )
group.add_argument("--wprec" , type=lowerCAmelCase__ , default=8 , help="weight precision" )
group.add_argument("--aprec" , type=lowerCAmelCase__ , default=8 , help="activation precision" )
group.add_argument("--quant-per-tensor" , action="store_true" , help="per tensor weight scaling" )
group.add_argument("--quant-disable" , action="store_true" , help="disable all quantizers" )
group.add_argument("--quant-disable-embeddings" , action="store_true" , help="disable all embeddings quantizers" )
group.add_argument("--quant-disable-keyword" , type=lowerCAmelCase__ , nargs="+" , help="disable quantizers by keyword" )
group.add_argument("--quant-disable-layer-module" , type=lowerCAmelCase__ , help="disable quantizers by keyword under layer." )
group.add_argument("--quant-enable-layer-module" , type=lowerCAmelCase__ , help="enable quantizers by keyword under layer" )
group.add_argument("--calibrator" , default="max" , help="which quantization range calibrator to use" )
group.add_argument("--percentile" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , help="percentile for PercentileCalibrator" )
group.add_argument("--fuse-qkv" , action="store_true" , help="use the same scale factor for qkv" )
group.add_argument("--clip-gelu" , metavar="N" , type=lowerCAmelCase__ , help="clip gelu output maximum value to N" )
group.add_argument(
"--recalibrate-weights" , action="store_true" , help=(
"recalibrate weight amaxes by taking the max of the weights."
" amaxes will be computed with the current quantization granularity (axis)."
) , )
def A__ ( _UpperCAmelCase : Dict ) -> int:
'''simple docstring'''
if args.calibrator == "max":
snake_case__ : str = "max"
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("Specify --percentile when using percentile calibrator" )
snake_case__ : List[str] = "histogram"
elif args.calibrator == "mse":
snake_case__ : Dict = "histogram"
else:
raise ValueError(F"""Invalid calibrator {args.calibrator}""" )
snake_case__ : str = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCAmelCase__ )
snake_case__ : Optional[Any] = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(lowerCAmelCase__ )
quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCAmelCase__ )
def A__ ( _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : Dict=False ) -> Union[str, Any]:
'''simple docstring'''
logger.info("Configuring Model for Quantization" )
logger.info(F"""using quantization package {pytorch_quantization.__file__}""" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(lowerCAmelCase__ , ["embeddings"] , which="weight" , _disabled=lowerCAmelCase__ )
if args.quant_disable:
set_quantizer_by_name(lowerCAmelCase__ , [""] , _disabled=lowerCAmelCase__ )
if args.quant_disable_keyword:
set_quantizer_by_name(lowerCAmelCase__ , args.quant_disable_keyword , _disabled=lowerCAmelCase__ )
if args.quant_disable_layer_module:
set_quantizer_by_name(lowerCAmelCase__ , [r"layer.\d+." + args.quant_disable_layer_module] , _disabled=lowerCAmelCase__ )
if args.quant_enable_layer_module:
set_quantizer_by_name(lowerCAmelCase__ , [r"layer.\d+." + args.quant_enable_layer_module] , _disabled=lowerCAmelCase__ )
if args.recalibrate_weights:
recalibrate_weights(lowerCAmelCase__ )
if args.fuse_qkv:
fuse_qkv(lowerCAmelCase__ , lowerCAmelCase__ )
if args.clip_gelu:
clip_gelu(lowerCAmelCase__ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(lowerCAmelCase__ )
def A__ ( _UpperCAmelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
logger.info("Enabling Calibration" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F"""{name:80}: {module}""" )
def A__ ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] ) -> Dict:
'''simple docstring'''
logger.info("Loading calibrated amax" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("percentile" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(lowerCAmelCase__ )
def A__ ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple ) -> int:
'''simple docstring'''
def fusea(_UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] ):
for mod in [qq, qk, qv]:
if not hasattr(lowerCAmelCase__ , "_amax" ):
print(" WARNING: NO AMAX BUFFER" )
return
snake_case__ : Tuple = qq._amax.detach().item()
snake_case__ : Optional[int] = qk._amax.detach().item()
snake_case__ : Union[str, Any] = qv._amax.detach().item()
snake_case__ : Optional[int] = max(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
qq._amax.fill_(lowerCAmelCase__ )
qk._amax.fill_(lowerCAmelCase__ )
qv._amax.fill_(lowerCAmelCase__ )
logger.info(F""" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}""" )
for name, mod in model.named_modules():
if name.endswith(".attention.self" ):
logger.info(F"""FUSE_QKV: {name:{name_width}}""" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def A__ ( _UpperCAmelCase : str , _UpperCAmelCase : Tuple ) -> Dict:
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith(".output.dense" ) and not name.endswith("attention.output.dense" ):
snake_case__ : int = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=lowerCAmelCase__ )
snake_case__ : Optional[Any] = mod._input_quantizer._amax.data.detach().item()
logger.info(F"""CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}""" )
def A__ ( _UpperCAmelCase : Any ) -> Dict:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCAmelCase__ , "_weight_quantizer" ) and mod._weight_quantizer.axis is not None:
snake_case__ : str = mod.weight.shape[0]
snake_case__ : Any = mod._weight_quantizer._amax.detach()
snake_case__ : Any = torch.ones(lowerCAmelCase__ , dtype=amax.dtype , device=amax.device ) * amax
print(F"""expanding {name} {amax} -> {mod._weight_quantizer._amax}""" )
def A__ ( _UpperCAmelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCAmelCase__ , "_weight_quantizer" ):
if not hasattr(mod.weight_quantizer , "_amax" ):
print("RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
snake_case__ : List[str] = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
snake_case__ : List[Any] = set(range(len(mod.weight.size() ) ) ) - axis_set
snake_case__ : Optional[int] = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCAmelCase__ , keepdims=lowerCAmelCase__ ).detach()
logger.info(F"""RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}""" )
snake_case__ : Tuple = amax
def A__ ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str]=25 , _UpperCAmelCase : List[Any]=1_80 , _UpperCAmelCase : Any=None ) -> Optional[Any]:
'''simple docstring'''
if ignore is None:
snake_case__ : Union[str, Any] = []
elif not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case__ : Any = [ignore]
snake_case__ : Dict = 0
for name, mod in model.named_modules():
if not hasattr(lowerCAmelCase__ , "weight" ):
continue
snake_case__ : List[Any] = max(lowerCAmelCase__ , len(lowerCAmelCase__ ) )
for name, mod in model.named_modules():
snake_case__ : List[str] = getattr(lowerCAmelCase__ , "_input_quantizer" , lowerCAmelCase__ )
snake_case__ : str = getattr(lowerCAmelCase__ , "_weight_quantizer" , lowerCAmelCase__ )
if not hasattr(lowerCAmelCase__ , "weight" ):
continue
if type(lowerCAmelCase__ ) in ignore:
continue
if [True for s in ignore if type(lowerCAmelCase__ ) is str and s in name]:
continue
snake_case__ : List[str] = F"""Act:{input_q.extra_repr()}"""
snake_case__ : List[Any] = F"""Wgt:{weight_q.extra_repr()}"""
snake_case__ : Optional[int] = F"""{name:{name_width}} {act_str} {wgt_str}"""
if len(lowerCAmelCase__ ) <= line_width:
logger.info(lowerCAmelCase__ )
else:
logger.info(F"""{name:{name_width}} {act_str}""" )
logger.info(F"""{' ':{name_width}} {wgt_str}""" )
def A__ ( _UpperCAmelCase : Dict ) -> List[Any]:
'''simple docstring'''
snake_case__ : List[str] = 0
for name, mod in model.named_modules():
if isinstance(lowerCAmelCase__ , pytorch_quantization.nn.TensorQuantizer ):
print(F"""{name:80} {mod}""" )
count += 1
print(F"""{count} TensorQuantizers found in model""" )
def A__ ( _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] ) -> int:
'''simple docstring'''
snake_case__ : Any = getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if quantizer_mod is not None:
assert hasattr(lowerCAmelCase__ , lowerCAmelCase__ )
setattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
logger.warning(F"""{name} has no {quantizer}""" )
def A__ ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any]="both" , **_UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
snake_case__ : int = F"""Warning: changing {which} quantizers of {name:{qname_width}}"""
for k, v in kwargs.items():
s += F""" {k}={v}"""
if which in ["input", "both"]:
set_quantizer(lowerCAmelCase__ , lowerCAmelCase__ , "_input_quantizer" , lowerCAmelCase__ , lowerCAmelCase__ )
if which in ["weight", "both"]:
set_quantizer(lowerCAmelCase__ , lowerCAmelCase__ , "_weight_quantizer" , lowerCAmelCase__ , lowerCAmelCase__ )
logger.info(lowerCAmelCase__ )
def A__ ( _UpperCAmelCase : str , _UpperCAmelCase : int , **_UpperCAmelCase : str ) -> str:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCAmelCase__ , "_input_quantizer" ) or hasattr(lowerCAmelCase__ , "_weight_quantizer" ):
for n in names:
if re.search(lowerCAmelCase__ , lowerCAmelCase__ ):
set_quantizers(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
elif name.endswith("_quantizer" ):
for n in names:
if re.search(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case__ : str = F"""Warning: changing {name:{name_width}}"""
for k, v in kwargs.items():
s += F""" {k}={v}"""
setattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
logger.info(lowerCAmelCase__ )
| 714 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 150 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
lowerCAmelCase__ = [
'''openmmlab/upernet-convnext-tiny''',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
lowerCAmelCase__ = '''UperNetConfig'''
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 0 , snake_case__ = False , snake_case__ = 1 , ):
"""simple docstring"""
super().__init__()
lowerCAmelCase : Dict = nn.Convad(
in_channels=snake_case__ , out_channels=snake_case__ , kernel_size=snake_case__ , padding=snake_case__ , bias=snake_case__ , dilation=snake_case__ , )
lowerCAmelCase : Any = nn.BatchNormad(snake_case__ )
lowerCAmelCase : str = nn.ReLU()
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : int = self.conv(snake_case__ )
lowerCAmelCase : Any = self.batch_norm(snake_case__ )
lowerCAmelCase : List[str] = self.activation(snake_case__ )
return output
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
super().__init__()
lowerCAmelCase : str = [
nn.AdaptiveAvgPoolad(snake_case__ ),
UperNetConvModule(snake_case__ , snake_case__ , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(snake_case__ ) , snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = input
for layer in self.layers:
lowerCAmelCase : int = layer(snake_case__ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
super().__init__()
lowerCAmelCase : int = pool_scales
lowerCAmelCase : Any = align_corners
lowerCAmelCase : Tuple = in_channels
lowerCAmelCase : Optional[Any] = channels
lowerCAmelCase : int = []
for i, pool_scale in enumerate(snake_case__ ):
lowerCAmelCase : List[str] = UperNetPyramidPoolingBlock(pool_scale=snake_case__ , in_channels=snake_case__ , channels=snake_case__ )
self.blocks.append(snake_case__ )
self.add_module(str(snake_case__ ) , snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[Any] = []
for ppm in self.blocks:
lowerCAmelCase : Any = ppm(snake_case__ )
lowerCAmelCase : int = nn.functional.interpolate(
snake_case__ , size=x.size()[2:] , mode="bilinear" , align_corners=self.align_corners )
ppm_outs.append(snake_case__ )
return ppm_outs
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ ):
"""simple docstring"""
super().__init__()
lowerCAmelCase : List[str] = config
lowerCAmelCase : List[Any] = config.pool_scales # e.g. (1, 2, 3, 6)
lowerCAmelCase : List[str] = in_channels
lowerCAmelCase : Optional[int] = config.hidden_size
lowerCAmelCase : Optional[int] = False
lowerCAmelCase : Any = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
lowerCAmelCase : Optional[Any] = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
lowerCAmelCase : Tuple = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
lowerCAmelCase : Optional[Any] = nn.ModuleList()
lowerCAmelCase : Union[str, Any] = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
lowerCAmelCase : List[Any] = UperNetConvModule(snake_case__ , self.channels , kernel_size=1 )
lowerCAmelCase : Dict = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(snake_case__ )
self.fpn_convs.append(snake_case__ )
lowerCAmelCase : Optional[int] = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def lowercase__ ( self ):
"""simple docstring"""
self.apply(self._init_weights )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if isinstance(snake_case__ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[Any] = inputs[-1]
lowerCAmelCase : List[str] = [x]
psp_outs.extend(self.psp_modules(snake_case__ ) )
lowerCAmelCase : Union[str, Any] = torch.cat(snake_case__ , dim=1 )
lowerCAmelCase : Tuple = self.bottleneck(snake_case__ )
return output
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : str = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(snake_case__ ) )
# build top-down path
lowerCAmelCase : int = len(snake_case__ )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
lowerCAmelCase : List[Any] = laterals[i - 1].shape[2:]
lowerCAmelCase : Optional[Any] = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=snake_case__ , mode="bilinear" , align_corners=self.align_corners )
# build outputs
lowerCAmelCase : List[str] = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
lowerCAmelCase : Dict = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="bilinear" , align_corners=self.align_corners )
lowerCAmelCase : int = torch.cat(snake_case__ , dim=1 )
lowerCAmelCase : Any = self.fpn_bottleneck(snake_case__ )
lowerCAmelCase : List[str] = self.classifier(snake_case__ )
return output
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ = 2 , snake_case__ = 3 , snake_case__ = 1 ):
"""simple docstring"""
super().__init__()
lowerCAmelCase : Dict = config
lowerCAmelCase : List[Any] = config.auxiliary_in_channels
lowerCAmelCase : str = config.auxiliary_channels
lowerCAmelCase : Optional[int] = config.auxiliary_num_convs
lowerCAmelCase : int = config.auxiliary_concat_input
lowerCAmelCase : Optional[int] = in_index
lowerCAmelCase : Optional[Any] = (kernel_size // 2) * dilation
lowerCAmelCase : Optional[Any] = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=snake_case__ , padding=snake_case__ , dilation=snake_case__ ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=snake_case__ , padding=snake_case__ , dilation=snake_case__ ) )
if self.num_convs == 0:
lowerCAmelCase : Optional[Any] = nn.Identity()
else:
lowerCAmelCase : Dict = nn.Sequential(*snake_case__ )
if self.concat_input:
lowerCAmelCase : str = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=snake_case__ , padding=kernel_size // 2 )
lowerCAmelCase : Optional[Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def lowercase__ ( self ):
"""simple docstring"""
self.apply(self._init_weights )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if isinstance(snake_case__ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Any = encoder_hidden_states[self.in_index]
lowerCAmelCase : Optional[Any] = self.convs(snake_case__ )
if self.concat_input:
lowerCAmelCase : Any = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
lowerCAmelCase : List[Any] = self.classifier(snake_case__ )
return output
class SCREAMING_SNAKE_CASE__ ( __a ):
"""simple docstring"""
a : str =UperNetConfig
a : str ='''pixel_values'''
a : str =True
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if isinstance(snake_case__ , snake_case__ ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def lowercase__ ( self ):
"""simple docstring"""
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def lowercase__ ( self , snake_case__ , snake_case__=False ):
"""simple docstring"""
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : List[str] = value
lowerCAmelCase__ = R'''
Parameters:
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
lowerCAmelCase__ = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See
`attentions` under returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under
returned tensors for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." , __a , )
class SCREAMING_SNAKE_CASE__ ( __a ):
"""simple docstring"""
def __init__( self , snake_case__ ):
"""simple docstring"""
super().__init__(snake_case__ )
lowerCAmelCase : str = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
lowerCAmelCase : str = UperNetHead(snake_case__ , in_channels=self.backbone.channels )
lowerCAmelCase : Any = UperNetFCNHead(snake_case__ ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("batch_size, sequence_length" ) )
@replace_return_docstrings(output_type=snake_case__ , config_class=_CONFIG_FOR_DOC )
def lowercase__ ( self , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , ):
"""simple docstring"""
lowerCAmelCase : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase : Tuple = output_attentions if output_attentions is not None else self.config.output_attentions
lowerCAmelCase : List[Any] = self.backbone.forward_with_filtered_kwargs(
snake_case__ , output_hidden_states=snake_case__ , output_attentions=snake_case__ )
lowerCAmelCase : List[str] = outputs.feature_maps
lowerCAmelCase : Any = self.decode_head(snake_case__ )
lowerCAmelCase : Optional[Any] = nn.functional.interpolate(snake_case__ , size=pixel_values.shape[2:] , mode="bilinear" , align_corners=snake_case__ )
lowerCAmelCase : str = None
if self.auxiliary_head is not None:
lowerCAmelCase : List[Any] = self.auxiliary_head(snake_case__ )
lowerCAmelCase : Optional[Any] = nn.functional.interpolate(
snake_case__ , size=pixel_values.shape[2:] , mode="bilinear" , align_corners=snake_case__ )
lowerCAmelCase : Dict = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("The number of labels should be greater than one" )
else:
# compute weighted loss
lowerCAmelCase : Dict = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
lowerCAmelCase : str = loss_fct(snake_case__ , snake_case__ )
lowerCAmelCase : Optional[Any] = loss_fct(snake_case__ , snake_case__ )
lowerCAmelCase : List[Any] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
lowerCAmelCase : str = (logits,) + outputs[1:]
else:
lowerCAmelCase : Optional[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=snake_case__ , logits=snake_case__ , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 645 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class A_ :
@staticmethod
def SCREAMING_SNAKE_CASE__ ( *snake_case__ : str , **snake_case__ : Dict ):
pass
def UpperCamelCase__ ( lowerCAmelCase__ ):
lowercase = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def UpperCamelCase__ ( lowerCAmelCase__ ):
lowercase = np.array(lowerCAmelCase__ )
lowercase = npimg.shape
return {"hash": hashimage(lowerCAmelCase__ ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class A_ ( unittest.TestCase ):
_A :Optional[int] = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
_A :str = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : List[str] ):
lowercase = MaskGenerationPipeline(model=snake_case__ , image_processor=snake_case__ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : Tuple , snake_case__ : Union[str, Any] ):
pass
@require_tf
@unittest.skip("""Image segmentation not implemented in TF""" )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
pass
@slow
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
lowercase = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""" )
lowercase = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=2_56 )
# Shortening by hashing
lowercase = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(snake_case__ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (4_80, 6_40)}, """scores""": 1.0_444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (4_80, 6_40)}, """scores""": 1.021},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (4_80, 6_40)}, """scores""": 1.0_167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (4_80, 6_40)}, """scores""": 1.0_132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (4_80, 6_40)}, """scores""": 1.0_053},
{"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (4_80, 6_40)}, """scores""": 0.9_967},
{"""mask""": {"""hash""": """453c7844bd""", """shape""": (4_80, 6_40)}, """scores""": 0.993},
{"""mask""": {"""hash""": """3d44f2926d""", """shape""": (4_80, 6_40)}, """scores""": 0.9_909},
{"""mask""": {"""hash""": """64033ddc3f""", """shape""": (4_80, 6_40)}, """scores""": 0.9_879},
{"""mask""": {"""hash""": """801064ff79""", """shape""": (4_80, 6_40)}, """scores""": 0.9_834},
{"""mask""": {"""hash""": """6172f276ef""", """shape""": (4_80, 6_40)}, """scores""": 0.9_716},
{"""mask""": {"""hash""": """b49e60e084""", """shape""": (4_80, 6_40)}, """scores""": 0.9_612},
{"""mask""": {"""hash""": """a811e775fd""", """shape""": (4_80, 6_40)}, """scores""": 0.9_599},
{"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (4_80, 6_40)}, """scores""": 0.9_552},
{"""mask""": {"""hash""": """9d8257e080""", """shape""": (4_80, 6_40)}, """scores""": 0.9_532},
{"""mask""": {"""hash""": """32de6454a8""", """shape""": (4_80, 6_40)}, """scores""": 0.9_516},
{"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (4_80, 6_40)}, """scores""": 0.9_499},
{"""mask""": {"""hash""": """3c6db475fb""", """shape""": (4_80, 6_40)}, """scores""": 0.9_483},
{"""mask""": {"""hash""": """c290813fb9""", """shape""": (4_80, 6_40)}, """scores""": 0.9_464},
{"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (4_80, 6_40)}, """scores""": 0.943},
{"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (4_80, 6_40)}, """scores""": 0.943},
{"""mask""": {"""hash""": """c749b25868""", """shape""": (4_80, 6_40)}, """scores""": 0.9_408},
{"""mask""": {"""hash""": """efb6cab859""", """shape""": (4_80, 6_40)}, """scores""": 0.9_335},
{"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (4_80, 6_40)}, """scores""": 0.9_326},
{"""mask""": {"""hash""": """788b798e24""", """shape""": (4_80, 6_40)}, """scores""": 0.9_262},
{"""mask""": {"""hash""": """abea804f0e""", """shape""": (4_80, 6_40)}, """scores""": 0.8_999},
{"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (4_80, 6_40)}, """scores""": 0.8_986},
{"""mask""": {"""hash""": """cd24047c8a""", """shape""": (4_80, 6_40)}, """scores""": 0.8_984},
{"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (4_80, 6_40)}, """scores""": 0.8_873},
{"""mask""": {"""hash""": """b5f47c9191""", """shape""": (4_80, 6_40)}, """scores""": 0.8_871}
] , )
# fmt: on
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowercase = """facebook/sam-vit-huge"""
lowercase = pipeline("""mask-generation""" , model=snake_case__ )
lowercase = image_segmenter(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=2_56 )
# Shortening by hashing
lowercase = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(snake_case__ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (4_80, 6_40)}, """scores""": 1.0_444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (4_80, 6_40)}, """scores""": 1.0_210},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (4_80, 6_40)}, """scores""": 1.0_167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (4_80, 6_40)}, """scores""": 1.0_132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (4_80, 6_40)}, """scores""": 1.0_053},
] , )
| 428 | 0 |
'''simple docstring'''
from itertools import count
def A_ ( _lowerCAmelCase : Optional[int] = 50 ):
"""simple docstring"""
_lowerCamelCase : int = [1] * min_block_length
for n in count(__lowerCAmelCase ):
fill_count_functions.append(1 )
for block_length in range(__lowerCAmelCase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1000000:
break
return n
if __name__ == "__main__":
print(f'''{solution() = }''') | 717 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {'vocab_file': 'spiece.model'}
UpperCAmelCase_ : Any = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
UpperCAmelCase_ : str = {
'AI-Sweden/gpt-sw3-126m': 2048,
'AI-Sweden/gpt-sw3-350m': 2048,
'AI-Sweden/gpt-sw3-1.6b': 2048,
'AI-Sweden/gpt-sw3-6.7b': 2048,
'AI-Sweden/gpt-sw3-20b': 2048,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Dict,__A : List[str],__A : Any=False,__A : Tuple=False,__A : Dict=False,__A : str=None,__A : List[str]=None,__A : Any=None,__A : str=None,__A : Optional[Dict[str, Any]] = None,**__A : str,):
_lowerCamelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCamelCase : int = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
_lowerCamelCase : Union[str, Any] = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_lowerCamelCase : Tuple = "<|endoftext|>" if eos_token is None else eos_token
_lowerCamelCase : List[str] = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_lowerCamelCase : Union[str, Any] = unk_token if pad_token is None else pad_token
_lowerCamelCase : str = eos_token if bos_token is None else bos_token
else:
_lowerCamelCase : List[str] = "<pad>" if pad_token is None else pad_token
_lowerCamelCase : str = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=__A,remove_space=__A,keep_accents=__A,bos_token=__A,eos_token=__A,unk_token=__A,pad_token=__A,sp_model_kwargs=self.sp_model_kwargs,**__A,)
_lowerCamelCase : Union[str, Any] = do_lower_case
_lowerCamelCase : List[Any] = remove_space
_lowerCamelCase : str = keep_accents
_lowerCamelCase : List[Any] = vocab_file
_lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__A )
# Used for whitespace normalization in input texts
# fmt : off
_lowerCamelCase : Union[str, Any] = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_lowerCamelCase : int = re.compile(
f'[{"".join(map(__A,list(range(0,9 ) ) + list(range(1_1,3_2 ) ) + list(range(1_2_7,1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]' )
def __getstate__( self : Dict ):
_lowerCamelCase : int = self.__dict__.copy()
_lowerCamelCase : Optional[Any] = None
return state
def __setstate__( self : Tuple,__A : int ):
_lowerCamelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self,"sp_model_kwargs" ):
_lowerCamelCase : List[str] = {}
_lowerCamelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase_ ( self : int ):
return len(self.sp_model )
def lowerCamelCase_ ( self : Dict,__A : str ):
_lowerCamelCase : Union[str, Any] = self.non_printing_characters_re.sub("",__A )
# Normalize whitespaces
_lowerCamelCase : Optional[Any] = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
_lowerCamelCase : Optional[Any] = unicodedata.normalize("NFC",__A )
return text
def lowerCamelCase_ ( self : Union[str, Any],__A : str,**__A : Optional[int] ):
_lowerCamelCase : str = self.preprocess_text(__A )
return self.sp_model.encode(__A,out_type=__A )
def lowerCamelCase_ ( self : int,__A : str ):
return self.sp_model.PieceToId(__A )
def lowerCamelCase_ ( self : Optional[int],__A : int ):
return self.sp_model.IdToPiece(__A )
@staticmethod
def lowerCamelCase_ ( __A : str ):
return out_string
def lowerCamelCase_ ( self : str,__A : List[str] ):
_lowerCamelCase : str = []
_lowerCamelCase : List[Any] = ""
_lowerCamelCase : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Optional[Any] = []
else:
current_sub_tokens.append(__A )
_lowerCamelCase : str = False
out_string += self.sp_model.decode(__A )
return out_string
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Optional[int] = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self : Optional[Any],__A : str,__A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : List[Any] = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,__A )
elif not os.path.isfile(self.vocab_file ):
with open(__A,"wb" ) as fi:
_lowerCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def lowerCamelCase_ ( self : Optional[int],__A : Union[str, List[str]],__A : Union[str, bool] = False ):
if isinstance(__A,__A ):
_lowerCamelCase : List[Any] = self.preprocess_text(__A )
_lowerCamelCase : Optional[Any] = self.sp_model.encode(__A )
else:
_lowerCamelCase : List[str] = [self.preprocess_text(__A ) for t in text]
_lowerCamelCase : int = self.sp_model.encode(__A )
if return_tensors is True or return_tensors == "pt":
_lowerCamelCase : str = torch.tensor(__A )
return token_ids
def lowerCamelCase_ ( self : List[Any],__A : Union[int, List[int]] ):
return self.sp_model.decode(__A )
def lowerCamelCase_ ( self : Optional[int],__A : "Conversation" ):
_lowerCamelCase : Any = [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()]
_lowerCamelCase : Tuple = (
f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(__A ) + f'{self.bos_token}Bot:'
)
return self.encode(text=__A ) | 11 | 0 |
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int = 10**12 ):
"""simple docstring"""
__a = 1
__a = 0
__a = 1
__a = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(F"""{solution() = }""")
| 225 |
"""simple docstring"""
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def lowercase ( __snake_case : List[str] , __snake_case : Any=False ):
lowercase_ : List[str] = OmegaConf.load(__snake_case )
if display:
print(yaml.dump(OmegaConf.to_container(__snake_case ) ) )
return config
def lowercase ( __snake_case : Optional[int] , __snake_case : Dict=None , __snake_case : Optional[Any]=None ):
if conf_path is None:
lowercase_ : str = '''./model_checkpoints/vqgan_only.yaml'''
lowercase_ : str = load_config(__snake_case , display=__snake_case )
lowercase_ : Optional[int] = VQModel(**config.model.params )
if ckpt_path is None:
lowercase_ : List[str] = '''./model_checkpoints/vqgan_only.pt'''
lowercase_ : Optional[Any] = torch.load(__snake_case , map_location=__snake_case )
if ".ckpt" in ckpt_path:
lowercase_ : List[Any] = sd['''state_dict''']
model.load_state_dict(__snake_case , strict=__snake_case )
model.to(__snake_case )
del sd
return model
def lowercase ( __snake_case : Tuple , __snake_case : int ):
lowercase_ , lowercase_ , lowercase_ : List[Any] = model.encode(__snake_case )
print(F'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' )
lowercase_ : Optional[int] = model.decode(__snake_case )
return xrec
def lowercase ( __snake_case : Any , __snake_case : List[str]=False ):
lowercase_ , lowercase_ : Optional[Any] = string.rsplit('''.''' , 1 )
if reload:
lowercase_ : Union[str, Any] = importlib.import_module(__snake_case )
importlib.reload(__snake_case )
return getattr(importlib.import_module(__snake_case , package=__snake_case ) , cls )
def lowercase ( __snake_case : List[Any] ):
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''' )
return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) )
def lowercase ( __snake_case : Any , __snake_case : List[str] , __snake_case : Tuple=True , __snake_case : Dict=True ):
lowercase_ : str = instantiate_from_config(__snake_case )
if sd is not None:
model.load_state_dict(__snake_case )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def lowercase ( __snake_case : int , __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : str ):
# load the specified checkpoint
if ckpt:
lowercase_ : Optional[Any] = torch.load(__snake_case , map_location='''cpu''' )
lowercase_ : Any = pl_sd['''global_step''']
print(F'''loaded model from global step {global_step}.''' )
else:
lowercase_ : Optional[Any] = {'''state_dict''': None}
lowercase_ : Dict = None
lowercase_ : Union[str, Any] = load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=__snake_case , eval_mode=__snake_case )['''model''']
return model, global_step
| 231 | 0 |
"""simple docstring"""
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
_UpperCamelCase : List[str] = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n'
_UpperCamelCase : List[str] = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'
_UpperCamelCase : Optional[Any] = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def UpperCamelCase_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=4 , _lowerCamelCase=False ):
lowercase = compute_bleu(
reference_corpus=_lowerCamelCase , translation_corpus=_lowerCamelCase , max_order=_lowerCamelCase , smooth=_lowerCamelCase )
((lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 134 |
"""simple docstring"""
from PIL import Image
def _SCREAMING_SNAKE_CASE ( __snake_case : Image ):
'''simple docstring'''
lowercase , lowercase = image.size
lowercase = 0
lowercase = image.load()
for i in range(__snake_case ):
for j in range(__snake_case ):
lowercase = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(__snake_case ):
for i in range(__snake_case ):
lowercase = 2_55 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
_UpperCamelCase : Any = mean_threshold(Image.open('path_to_image').convert('L'))
image.save('output_image_path')
| 134 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case : int = {
'configuration_clip': [
'CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPConfig',
'CLIPOnnxConfig',
'CLIPTextConfig',
'CLIPVisionConfig',
],
'processing_clip': ['CLIPProcessor'],
'tokenization_clip': ['CLIPTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = ['CLIPTokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = ['CLIPFeatureExtractor']
_snake_case : Optional[Any] = ['CLIPImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : str = [
'CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPModel',
'CLIPPreTrainedModel',
'CLIPTextModel',
'CLIPTextModelWithProjection',
'CLIPVisionModel',
'CLIPVisionModelWithProjection',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[Any] = [
'TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCLIPModel',
'TFCLIPPreTrainedModel',
'TFCLIPTextModel',
'TFCLIPVisionModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = [
'FlaxCLIPModel',
'FlaxCLIPPreTrainedModel',
'FlaxCLIPTextModel',
'FlaxCLIPTextPreTrainedModel',
'FlaxCLIPVisionModel',
'FlaxCLIPVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_snake_case : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 53 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : List[Any], lowerCAmelCase_ : str ):
# Initialise PyTorch model
__lowerCAmelCase = RemBertConfig.from_json_file(lowerCAmelCase_ )
print('Building PyTorch model from configuration: {}'.format(str(lowerCAmelCase_ ) ) )
__lowerCAmelCase = RemBertModel(lowerCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# Save pytorch-model
print('Save PyTorch model to {}'.format(lowerCAmelCase_ ) )
torch.save(model.state_dict(), lowerCAmelCase_ )
if __name__ == "__main__":
_snake_case : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_snake_case : int = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 53 | 1 |
'''simple docstring'''
import numpy as np
def lowerCamelCase_ ( A_ , A_ ):
return np.where(vector > 0 , A_ , (alpha * (np.exp(A_ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 575 |
'''simple docstring'''
from statistics import mean
import numpy as np
def lowerCamelCase_ ( A_ , A_ , A_ , A_ ):
__lowerCamelCase = 0
# Number of processes finished
__lowerCamelCase = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
__lowerCamelCase = [0] * no_of_process
# List to include calculation results
__lowerCamelCase = [0] * no_of_process
# Sort by arrival time.
__lowerCamelCase = [burst_time[i] for i in np.argsort(A_ )]
__lowerCamelCase = [process_name[i] for i in np.argsort(A_ )]
arrival_time.sort()
while no_of_process > finished_process_count:
__lowerCamelCase = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
__lowerCamelCase = arrival_time[i]
__lowerCamelCase = 0
# Index showing the location of the process being performed
__lowerCamelCase = 0
# Saves the current response ratio.
__lowerCamelCase = 0
for i in range(0 , A_ ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
__lowerCamelCase = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
__lowerCamelCase = temp
__lowerCamelCase = i
# Calculate the turn around time
__lowerCamelCase = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
__lowerCamelCase = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def lowerCamelCase_ ( A_ , A_ , A_ , A_ ):
__lowerCamelCase = [0] * no_of_process
for i in range(0 , A_ ):
__lowerCamelCase = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
_UpperCamelCase : List[Any] =5
_UpperCamelCase : str =["A", "B", "C", "D", "E"]
_UpperCamelCase : int =[1, 2, 3, 4, 5]
_UpperCamelCase : Tuple =[1, 2, 3, 4, 5]
_UpperCamelCase : int =calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
_UpperCamelCase : Tuple =calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("Process name \tArrival time \tBurst time \tTurn around time \tWaiting time")
for i in range(0, no_of_process):
print(
f'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
f'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(f'''average waiting time : {mean(waiting_time):.5f}''')
print(f'''average turn around time : {mean(turn_around_time):.5f}''')
| 575 | 1 |
"""simple docstring"""
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
UpperCamelCase__ = trt.Logger(trt.Logger.WARNING)
UpperCamelCase__ = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
UpperCamelCase__ = logging.getLogger(__name__)
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=3_8_4,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=1_2_8,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=2_0,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=3_0,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=4_2, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
UpperCamelCase__ = parser.parse_args()
if args.tokenizer_name:
UpperCamelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
UpperCamelCase__ = args.per_device_eval_batch_size
UpperCamelCase__ = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
UpperCamelCase__ = True
UpperCamelCase__ = '''temp_engine/bert-fp32.engine'''
if args.fpaa:
UpperCamelCase__ = '''temp_engine/bert-fp16.engine'''
if args.inta:
UpperCamelCase__ = '''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
UpperCamelCase__ = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
UpperCamelCase__ = [network.get_input(i) for i in range(network.num_inputs)]
UpperCamelCase__ = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
UpperCamelCase__ = 1 << 5_0
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
UpperCamelCase__ = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
UpperCamelCase__ = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def UpperCAmelCase ( snake_case : str , snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : Union[str, Any] , snake_case : Dict , snake_case : str , snake_case : int , snake_case : Optional[Any] ):
_lowerCAmelCase:Any = np.asarray(inputs['''input_ids'''] , dtype=np.intaa )
_lowerCAmelCase:Optional[int] = np.asarray(inputs['''attention_mask'''] , dtype=np.intaa )
_lowerCAmelCase:str = np.asarray(inputs['''token_type_ids'''] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , snake_case )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , snake_case )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , snake_case )
# start time
_lowerCAmelCase:int = time.time()
# Run inference
context.execute_async(
bindings=[int(snake_case ) for d_inp in d_inputs] + [int(snake_case ), int(snake_case )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(snake_case , snake_case , snake_case )
cuda.memcpy_dtoh_async(snake_case , snake_case , snake_case )
# Synchronize the stream and take time
stream.synchronize()
# end time
_lowerCAmelCase:Dict = time.time()
_lowerCAmelCase:Optional[int] = end_time - start_time
_lowerCAmelCase:str = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
UpperCamelCase__ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
UpperCamelCase__ = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
UpperCamelCase__ = raw_datasets['''validation'''].column_names
UpperCamelCase__ = '''question''' if '''question''' in column_names else column_names[0]
UpperCamelCase__ = '''context''' if '''context''' in column_names else column_names[1]
UpperCamelCase__ = '''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
UpperCamelCase__ = tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
UpperCamelCase__ = min(args.max_seq_length, tokenizer.model_max_length)
def UpperCAmelCase ( snake_case : List[str] ):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
_lowerCAmelCase:Optional[int] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
_lowerCAmelCase:str = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='''only_second''' if pad_on_right else '''only_first''' , max_length=snake_case , stride=args.doc_stride , return_overflowing_tokens=snake_case , return_offsets_mapping=snake_case , padding='''max_length''' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
_lowerCAmelCase:Tuple = tokenized_examples.pop('''overflow_to_sample_mapping''' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
_lowerCAmelCase:Union[str, Any] = []
for i in range(len(tokenized_examples['''input_ids'''] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
_lowerCAmelCase:Optional[Any] = tokenized_examples.sequence_ids(snake_case )
_lowerCAmelCase:List[Any] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
_lowerCAmelCase:Union[str, Any] = sample_mapping[i]
tokenized_examples["example_id"].append(examples['''id'''][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
_lowerCAmelCase:Any = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['''offset_mapping'''][i] )
]
return tokenized_examples
UpperCamelCase__ = raw_datasets['''validation''']
# Validation Feature Creation
UpperCamelCase__ = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
UpperCamelCase__ = default_data_collator
UpperCamelCase__ = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
UpperCamelCase__ = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def UpperCAmelCase ( snake_case : Optional[int] , snake_case : Dict , snake_case : Optional[int] , snake_case : Tuple="eval" ):
# Post-processing: we match the start logits and end logits to answers in the original context.
_lowerCAmelCase:Union[str, Any] = postprocess_qa_predictions(
examples=snake_case , features=snake_case , predictions=snake_case , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=snake_case , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
_lowerCAmelCase:Any = [
{'''id''': k, '''prediction_text''': v, '''no_answer_probability''': 0.0} for k, v in predictions.items()
]
else:
_lowerCAmelCase:Union[str, Any] = [{'''id''': k, '''prediction_text''': v} for k, v in predictions.items()]
_lowerCAmelCase:Tuple = [{'''id''': ex['''id'''], '''answers''': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=snake_case , label_ids=snake_case )
UpperCamelCase__ = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def UpperCAmelCase ( snake_case : Optional[Any] ):
return trt.volume(engine.get_binding_shape(snake_case ) ) * engine.get_binding_dtype(snake_case ).itemsize
# Allocate device memory for inputs and outputs.
UpperCamelCase__ = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
UpperCamelCase__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
UpperCamelCase__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
UpperCamelCase__ = cuda.mem_alloc(h_outputa.nbytes)
UpperCamelCase__ = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
UpperCamelCase__ = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(F" Num examples = {len(eval_dataset)}")
logger.info(F" Batch size = {args.per_device_eval_batch_size}")
UpperCamelCase__ = 0.0
UpperCamelCase__ = 0
UpperCamelCase__ = timeit.default_timer()
UpperCamelCase__ = None
for step, batch in enumerate(eval_dataloader):
UpperCamelCase__ , UpperCamelCase__ = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
UpperCamelCase__ , UpperCamelCase__ = outputs
UpperCamelCase__ = torch.tensor(start_logits)
UpperCamelCase__ = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
UpperCamelCase__ = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_0_0)
UpperCamelCase__ = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_0_0)
UpperCamelCase__ = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
UpperCamelCase__ = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_0_0)
if all_preds is not None:
UpperCamelCase__ = nested_truncate(all_preds, len(eval_dataset))
UpperCamelCase__ = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 1_0_0_0 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 1_0_0_0))
logger.info('''Total Number of Inference = %d''', niter)
UpperCamelCase__ = post_processing_function(eval_examples, eval_dataset, all_preds)
UpperCamelCase__ = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F"Evaluation metrics: {eval_metric}")
| 227 |
"""simple docstring"""
import argparse
import datetime
def UpperCAmelCase ( snake_case : str ):
_lowerCAmelCase:Dict = {
'''0''': '''Sunday''',
'''1''': '''Monday''',
'''2''': '''Tuesday''',
'''3''': '''Wednesday''',
'''4''': '''Thursday''',
'''5''': '''Friday''',
'''6''': '''Saturday''',
}
_lowerCAmelCase:Union[str, Any] = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(snake_case ) < 11:
raise ValueError('''Must be 10 characters long''' )
# Get month
_lowerCAmelCase:int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError('''Month must be between 1 - 12''' )
_lowerCAmelCase:str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get day
_lowerCAmelCase:int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError('''Date must be between 1 - 31''' )
# Get second separator
_lowerCAmelCase:str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get year
_lowerCAmelCase:int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
'''Year out of range. There has to be some sort of limit...right?''' )
# Get datetime obj for validation
_lowerCAmelCase:int = datetime.date(int(snake_case ) , int(snake_case ) , int(snake_case ) )
# Start math
if m <= 2:
_lowerCAmelCase:Optional[Any] = y - 1
_lowerCAmelCase:str = m + 12
# maths var
_lowerCAmelCase:int = int(str(snake_case )[:2] )
_lowerCAmelCase:int = int(str(snake_case )[2:] )
_lowerCAmelCase:int = int(2.6 * m - 5.39 )
_lowerCAmelCase:int = int(c / 4 )
_lowerCAmelCase:int = int(k / 4 )
_lowerCAmelCase:int = int(d + k )
_lowerCAmelCase:int = int(t + u + v + x )
_lowerCAmelCase:int = int(z - (2 * c) )
_lowerCAmelCase:int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('''The date was evaluated incorrectly. Contact developer.''' )
# Response
_lowerCAmelCase:str = F'Your date {date_input}, is a {days[str(snake_case )]}!'
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = argparse.ArgumentParser(
description=(
'''Find out what day of the week nearly any date is or was. Enter '''
'''date as a string in the mm-dd-yyyy or mm/dd/yyyy format'''
)
)
parser.add_argument(
'''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)'''
)
UpperCamelCase__ = parser.parse_args()
zeller(args.date_input)
| 227 | 1 |
from __future__ import annotations
def lowerCamelCase_ ( UpperCAmelCase_ : list ) -> float:
'''simple docstring'''
if not nums:
raise ValueError('List is empty' )
return sum(UpperCAmelCase_ ) / len(UpperCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 648 |
import functools
def lowerCamelCase_ ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int] ) -> int:
'''simple docstring'''
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(UpperCAmelCase_ ) != 3 or not all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(UpperCAmelCase_ ) == 0:
return 0
if min(UpperCAmelCase_ ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(UpperCAmelCase_ ) >= 3_6_6:
raise ValueError('All days elements should be less than 366' )
_UpperCamelCase : Union[str, Any] = set(UpperCAmelCase_ )
@functools.cache
def dynamic_programming(UpperCAmelCase_ : int ) -> int:
if index > 3_6_5:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 3_0 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 648 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : List[Any]=18 , SCREAMING_SNAKE_CASE__ : Optional[Any]=30 , SCREAMING_SNAKE_CASE__ : Optional[Any]=4_00 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , ) -> Tuple:
__lowerCamelCase = size if size is not None else {"""height""": 18, """width""": 18}
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = num_channels
__lowerCamelCase = image_size
__lowerCamelCase = min_resolution
__lowerCamelCase = max_resolution
__lowerCamelCase = do_resize
__lowerCamelCase = size
__lowerCamelCase = apply_ocr
def __A ( self : List[Any] ) -> Union[str, Any]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase__ ( a__ , unittest.TestCase ):
a__ : Optional[Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __A ( self : Optional[Any] ) -> Optional[Any]:
__lowerCamelCase = LayoutLMvaImageProcessingTester(self )
@property
def __A ( self : Optional[int] ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Any ) -> List[Any]:
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , '''do_resize''' ) )
self.assertTrue(hasattr(__a , '''size''' ) )
self.assertTrue(hasattr(__a , '''apply_ocr''' ) )
def __A ( self : Dict ) -> List[str]:
__lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
__lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def __A ( self : int ) -> int:
pass
def __A ( self : List[str] ) -> Optional[Any]:
# Initialize image_processing
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , __a )
self.assertIsInstance(encoding.boxes , __a )
# Test batched
__lowerCamelCase = image_processing(__a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __A ( self : List[str] ) -> Tuple:
# Initialize image_processing
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__lowerCamelCase = image_processing(__a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __A ( self : List[str] ) -> List[str]:
# Initialize image_processing
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__lowerCamelCase = image_processing(__a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __A ( self : int ) -> Tuple:
# with apply_OCR = True
__lowerCamelCase = LayoutLMvaImageProcessor()
from datasets import load_dataset
__lowerCamelCase = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
__lowerCamelCase = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
__lowerCamelCase = image_processing(__a , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__lowerCamelCase = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
__lowerCamelCase = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __a )
self.assertListEqual(encoding.boxes , __a )
# with apply_OCR = False
__lowerCamelCase = LayoutLMvaImageProcessor(apply_ocr=__a )
__lowerCamelCase = image_processing(__a , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 298 |
'''simple docstring'''
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
lowercase : List[str] = """."""
if __name__ == "__main__":
lowercase : List[str] = os.path.join(REPO_PATH, """utils/documentation_tests.txt""")
lowercase : Union[str, Any] = []
lowercase : List[str] = []
with open(doctest_file_path) as fp:
for line in fp:
lowercase : List[str] = line.strip()
lowercase : Optional[Any] = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
lowercase : int = """\n""".join(non_existent_paths)
raise ValueError(F"""`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}""")
if all_paths != sorted(all_paths):
raise ValueError("""Files in `utils/documentation_tests.txt` are not in alphabetical order.""")
| 116 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ : Optional[Any] = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : str = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Any = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
a_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 444 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class _snake_case :
def __init__( self , a , ) -> Tuple:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = 13
SCREAMING_SNAKE_CASE = 7
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = 99
SCREAMING_SNAKE_CASE = 32
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = 37
SCREAMING_SNAKE_CASE = 'gelu'
SCREAMING_SNAKE_CASE = 0.1
SCREAMING_SNAKE_CASE = 0.1
SCREAMING_SNAKE_CASE = 512
SCREAMING_SNAKE_CASE = 16
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = 0.02
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = None
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length])
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices)
SCREAMING_SNAKE_CASE = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a) -> List[str]:
SCREAMING_SNAKE_CASE = TFEsmModel(config=a)
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
SCREAMING_SNAKE_CASE = model(a)
SCREAMING_SNAKE_CASE = [input_ids, input_mask]
SCREAMING_SNAKE_CASE = model(a)
SCREAMING_SNAKE_CASE = model(a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , ) -> str:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = TFEsmModel(config=a)
SCREAMING_SNAKE_CASE = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
SCREAMING_SNAKE_CASE = model(a)
SCREAMING_SNAKE_CASE = [input_ids, input_mask]
SCREAMING_SNAKE_CASE = model(a , encoder_hidden_states=a)
# Also check the case where encoder outputs are not passed
SCREAMING_SNAKE_CASE = model(a , attention_mask=a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a) -> Optional[int]:
SCREAMING_SNAKE_CASE = TFEsmForMaskedLM(config=a)
SCREAMING_SNAKE_CASE = model([input_ids, input_mask])
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a) -> Dict:
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = TFEsmForTokenClassification(config=a)
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
SCREAMING_SNAKE_CASE = model(a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _snake_case ( A__ , A__ , unittest.TestCase ):
_lowercase : str = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
_lowercase : int = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowercase : str = False
_lowercase : Tuple = False
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = TFEsmModelTester(self)
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=a , hidden_size=37)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a)
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = TFEsmModel.from_pretrained(a)
self.assertIsNotNone(a)
@unittest.skip('Protein models do not support embedding resizing.')
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
pass
@unittest.skip('Protein models do not support embedding resizing.')
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
pass
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(a)
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer)
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
SCREAMING_SNAKE_CASE = model.get_bias()
assert isinstance(a , a)
for k, v in name.items():
assert isinstance(a , tf.Variable)
else:
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
assert x is None
SCREAMING_SNAKE_CASE = model.get_bias()
assert name is None
@require_tf
class _snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D')
SCREAMING_SNAKE_CASE = tf.constant([[0, 1, 2, 3, 4, 5]])
SCREAMING_SNAKE_CASE = model(a)[0]
SCREAMING_SNAKE_CASE = [1, 6, 33]
self.assertEqual(list(output.numpy().shape) , a)
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE = tf.constant(
[
[
[8.92_15_18, -10.58_98_14, -6.4_67_13_07],
[-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15],
[-7.78_12_47, -13.95_15_57, -3.74_05_92],
]
])
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2))
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D')
SCREAMING_SNAKE_CASE = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]])
SCREAMING_SNAKE_CASE = model(a)[0]
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE = tf.constant(
[
[
[0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39],
[0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22],
[0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28],
]
])
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4))
| 444 | 1 |
import numpy as np
from PIL import Image
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ : str = np.array(lowerCAmelCase__ )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
lowerCamelCase_ : List[str] = 0
lowerCamelCase_ : Optional[int] = 0
lowerCamelCase_ : List[Any] = 0
lowerCamelCase_ : Dict = 0
# compute the shape of the output matrix
lowerCamelCase_ : Tuple = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
lowerCamelCase_ : Union[str, Any] = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
lowerCamelCase_ : List[str] = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCamelCase_ : int = 0
lowerCamelCase_ : Union[str, Any] = 0
return updated_arr
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ : List[Any] = np.array(lowerCAmelCase__ )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
lowerCamelCase_ : int = 0
lowerCamelCase_ : Optional[int] = 0
lowerCamelCase_ : Any = 0
lowerCamelCase_ : str = 0
# compute the shape of the output matrix
lowerCamelCase_ : List[Any] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
lowerCamelCase_ : Union[str, Any] = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
lowerCamelCase_ : Union[str, Any] = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCamelCase_ : Tuple = 0
lowerCamelCase_ : Optional[int] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="""avgpooling""", verbose=True)
# Loading the image
_lowercase : Dict =Image.open("""path_to_image""")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 364 |
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_lowercase : int =logging.get_logger(__name__)
class UpperCamelCase_ ( snake_case__ ):
def __init__( self : Tuple , *lowerCamelCase : List[Any] , **lowerCamelCase : Union[str, Any] ):
warnings.warn(
'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use YolosImageProcessor instead.' , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 364 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : int = '''lxmert'''
snake_case__ : int = {}
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple=3_0_5_2_2 , SCREAMING_SNAKE_CASE__ : List[Any]=7_6_8 , SCREAMING_SNAKE_CASE__ : str=1_2 , SCREAMING_SNAKE_CASE__ : List[Any]=9_5_0_0 , SCREAMING_SNAKE_CASE__ : Optional[int]=1_6_0_0 , SCREAMING_SNAKE_CASE__ : Dict=4_0_0 , SCREAMING_SNAKE_CASE__ : int=3_0_7_2 , SCREAMING_SNAKE_CASE__ : Dict="gelu" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=5_1_2 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE__ : Optional[int]=1E-12 , SCREAMING_SNAKE_CASE__ : Any=9 , SCREAMING_SNAKE_CASE__ : str=5 , SCREAMING_SNAKE_CASE__ : str=5 , SCREAMING_SNAKE_CASE__ : List[Any]=2_0_4_8 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : Any=6.67 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Optional[int]=True , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> Optional[Any]:
a_ : Union[str, Any] = vocab_size
a_ : str = hidden_size
a_ : Tuple = num_attention_heads
a_ : Optional[Any] = hidden_act
a_ : Optional[int] = intermediate_size
a_ : str = hidden_dropout_prob
a_ : Dict = attention_probs_dropout_prob
a_ : Any = max_position_embeddings
a_ : List[str] = type_vocab_size
a_ : Any = initializer_range
a_ : str = layer_norm_eps
a_ : Optional[int] = num_qa_labels
a_ : int = num_object_labels
a_ : Tuple = num_attr_labels
a_ : List[Any] = l_layers
a_ : List[str] = x_layers
a_ : Optional[int] = r_layers
a_ : int = visual_feat_dim
a_ : Any = visual_pos_dim
a_ : Any = visual_loss_normalizer
a_ : Any = task_matched
a_ : List[Any] = task_mask_lm
a_ : Optional[int] = task_obj_predict
a_ : Union[str, Any] = task_qa
a_ : List[Any] = visual_obj_loss
a_ : Optional[Any] = visual_attr_loss
a_ : Union[str, Any] = visual_feat_loss
a_ : str = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**SCREAMING_SNAKE_CASE__ )
| 720 |
import random
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ : str ) -> tuple[list[int], list[int]]:
a_ : int = [ord(SCREAMING_SNAKE_CASE__ ) for i in text]
a_ : Any = []
a_ : Optional[int] = []
for i in plain:
a_ : Tuple = random.randint(1 , 3_0_0 )
a_ : Optional[int] = (i + k) * k
cipher.append(SCREAMING_SNAKE_CASE__ )
key.append(SCREAMING_SNAKE_CASE__ )
return cipher, key
@staticmethod
def SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : list[int] ) -> str:
a_ : List[Any] = []
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
a_ : str = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(SCREAMING_SNAKE_CASE__ ) )
return "".join(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
UpperCAmelCase_ , UpperCAmelCase_ : Any = Onepad().encrypt('Hello')
print(c, k)
print(Onepad().decrypt(c, k))
| 443 | 0 |
"""simple docstring"""
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowercase__ ( snake_case_ :List[str] , snake_case_ :List[Any] , snake_case_ :List[Any] ):
__UpperCAmelCase = 1.5
__UpperCAmelCase = int(factor * num_class_images )
__UpperCAmelCase = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=snake_case_ , aesthetic_weight=0.1 )
os.makedirs(F'''{class_data_dir}/images''' , exist_ok=snake_case_ )
if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
__UpperCAmelCase = client.query(text=snake_case_ )
if len(snake_case_ ) >= factor * num_class_images or num_images > 1E4:
break
else:
__UpperCAmelCase = int(factor * num_images )
__UpperCAmelCase = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=snake_case_ , aesthetic_weight=0.1 , )
__UpperCAmelCase = 0
__UpperCAmelCase = 0
__UpperCAmelCase = tqdm(desc='''downloading real regularization images''' , total=snake_case_ )
with open(F'''{class_data_dir}/caption.txt''' , '''w''' ) as fa, open(F'''{class_data_dir}/urls.txt''' , '''w''' ) as fa, open(
F'''{class_data_dir}/images.txt''' , '''w''' ) as fa:
while total < num_class_images:
__UpperCAmelCase = class_images[count]
count += 1
try:
__UpperCAmelCase = requests.get(images['''url'''] )
if img.status_code == 200:
__UpperCAmelCase = Image.open(BytesIO(img.content ) )
with open(F'''{class_data_dir}/images/{total}.jpg''' , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(F'''{class_data_dir}/images/{total}.jpg''' + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowercase__ ( ):
__UpperCAmelCase = argparse.ArgumentParser('''''' , add_help=snake_case_ )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=snake_case_ , type=snake_case_ )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=snake_case_ , type=snake_case_ )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=200 , type=snake_case_ )
return parser.parse_args()
if __name__ == "__main__":
_lowercase : Tuple = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 49 |
"""simple docstring"""
from cva import destroyAllWindows, imread, imshow, waitKey
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
_lowercase , _lowercase: Union[str, Any] = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
_lowercase: Dict = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
A__ : Optional[Any] = imread('image_data/lena.jpg', 1)
# convert to its negative
A__ : Dict = convert_to_negative(img)
# show result image
imshow('negative of original image', img)
waitKey(0)
destroyAllWindows()
| 353 | 0 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
a_ = '''.'''
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
a_ = [
'''Assert''',
'''AssignVariableOp''',
'''EmptyTensorList''',
'''MergeV2Checkpoints''',
'''ReadVariableOp''',
'''ResourceGather''',
'''RestoreV2''',
'''SaveV2''',
'''ShardedFilename''',
'''StatefulPartitionedCall''',
'''StaticRegexFullMatch''',
'''VarHandleOp''',
]
def _a ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = SavedModel()
lowerCAmelCase__ = []
with open(os.path.join(UpperCamelCase_ , "utils" , "tf_ops" , "onnx.json" ) ) as f:
lowerCAmelCase__ = json.load(UpperCamelCase_ )["opsets"]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(UpperCamelCase_ )] )
with open(UpperCamelCase_ , "rb" ) as f:
saved_model.ParseFromString(f.read() )
lowerCAmelCase__ = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
lowerCAmelCase__ = sorted(UpperCamelCase_ )
lowerCAmelCase__ = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(UpperCamelCase_ )
if strict and len(UpperCamelCase_ ) > 0:
raise Exception(F"Found the following incompatible ops for the opset {opset}:\n" + incompatible_ops )
elif len(UpperCamelCase_ ) > 0:
print(F"Found the following incompatible ops for the opset {opset}:" )
print(*UpperCamelCase_ , sep="\n" )
else:
print(F"The saved model {saved_model_path} can properly be converted with ONNX." )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''')
parser.add_argument(
'''--opset''', default=12, type=int, help='''The ONNX opset against which the model has to be tested.'''
)
parser.add_argument(
'''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.'''
)
parser.add_argument(
'''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)'''
)
a_ = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 115 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _a ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Tuple=0.999 , UpperCamelCase_ : Union[str, Any]="cosine" , ) -> str:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCamelCase_ : List[Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCamelCase_ : int ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}" )
lowerCAmelCase__ = []
for i in range(UpperCamelCase_ ):
lowerCAmelCase__ = i / num_diffusion_timesteps
lowerCAmelCase__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCamelCase_ ) / alpha_bar_fn(UpperCamelCase_ ) , UpperCamelCase_ ) )
return torch.tensor(UpperCamelCase_ , dtype=torch.floataa )
class lowercase__ ( _UpperCAmelCase, _UpperCAmelCase ):
a_ =[e.name for e in KarrasDiffusionSchedulers]
a_ =2
@register_to_config
def __init__( self , __UpperCAmelCase = 1000 , __UpperCAmelCase = 0.00_085 , __UpperCAmelCase = 0.012 , __UpperCAmelCase = "linear" , __UpperCAmelCase = None , __UpperCAmelCase = "epsilon" , __UpperCAmelCase = "linspace" , __UpperCAmelCase = 0 , )-> int:
'''simple docstring'''
if trained_betas is not None:
lowerCAmelCase__ = torch.tensor(__UpperCAmelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
lowerCAmelCase__ = torch.linspace(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase__ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __UpperCAmelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase__ = betas_for_alpha_bar(__UpperCAmelCase )
else:
raise NotImplementedError(F"{beta_schedule} does is not implemented for {self.__class__}" )
lowerCAmelCase__ = 1.0 - self.betas
lowerCAmelCase__ = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None )-> str:
'''simple docstring'''
if schedule_timesteps is None:
lowerCAmelCase__ = self.timesteps
lowerCAmelCase__ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
lowerCAmelCase__ = 1 if len(__UpperCAmelCase ) > 1 else 0
else:
lowerCAmelCase__ = timestep.cpu().item() if torch.is_tensor(__UpperCAmelCase ) else timestep
lowerCAmelCase__ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , )-> torch.FloatTensor:
'''simple docstring'''
lowerCAmelCase__ = self.index_for_timestep(__UpperCAmelCase )
if self.state_in_first_order:
lowerCAmelCase__ = self.sigmas[step_index]
else:
lowerCAmelCase__ = self.sigmas_interpol[step_index]
lowerCAmelCase__ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = num_inference_steps
lowerCAmelCase__ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
lowerCAmelCase__ = np.linspace(0 , num_train_timesteps - 1 , __UpperCAmelCase , dtype=__UpperCAmelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
lowerCAmelCase__ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase__ = (np.arange(0 , __UpperCAmelCase ) * step_ratio).round()[::-1].copy().astype(__UpperCAmelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
lowerCAmelCase__ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase__ = (np.arange(__UpperCAmelCase , 0 , -step_ratio )).round().copy().astype(__UpperCAmelCase )
timesteps -= 1
else:
raise ValueError(
F"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." )
lowerCAmelCase__ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
lowerCAmelCase__ = torch.from_numpy(np.log(__UpperCAmelCase ) ).to(__UpperCAmelCase )
lowerCAmelCase__ = np.interp(__UpperCAmelCase , np.arange(0 , len(__UpperCAmelCase ) ) , __UpperCAmelCase )
lowerCAmelCase__ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
lowerCAmelCase__ = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase )
# interpolate sigmas
lowerCAmelCase__ = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
lowerCAmelCase__ = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
lowerCAmelCase__ = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(__UpperCAmelCase ).startswith("mps" ):
# mps does not support float64
lowerCAmelCase__ = torch.from_numpy(__UpperCAmelCase ).to(__UpperCAmelCase , dtype=torch.floataa )
else:
lowerCAmelCase__ = torch.from_numpy(__UpperCAmelCase ).to(__UpperCAmelCase )
# interpolate timesteps
lowerCAmelCase__ = self.sigma_to_t(__UpperCAmelCase ).to(__UpperCAmelCase , dtype=timesteps.dtype )
lowerCAmelCase__ = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
lowerCAmelCase__ = torch.cat([timesteps[:1], interleaved_timesteps] )
lowerCAmelCase__ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
lowerCAmelCase__ = defaultdict(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = sigma.log()
# get distribution
lowerCAmelCase__ = log_sigma - self.log_sigmas[:, None]
# get sigmas range
lowerCAmelCase__ = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
lowerCAmelCase__ = low_idx + 1
lowerCAmelCase__ = self.log_sigmas[low_idx]
lowerCAmelCase__ = self.log_sigmas[high_idx]
# interpolate sigmas
lowerCAmelCase__ = (low - log_sigma) / (low - high)
lowerCAmelCase__ = w.clamp(0 , 1 )
# transform interpolation to time range
lowerCAmelCase__ = (1 - w) * low_idx + w * high_idx
lowerCAmelCase__ = t.view(sigma.shape )
return t
@property
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
return self.sample is None
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = True , )-> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
lowerCAmelCase__ = self.index_for_timestep(__UpperCAmelCase )
# advance index counter by 1
lowerCAmelCase__ = timestep.cpu().item() if torch.is_tensor(__UpperCAmelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
lowerCAmelCase__ = self.sigmas[step_index]
lowerCAmelCase__ = self.sigmas_interpol[step_index + 1]
lowerCAmelCase__ = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
lowerCAmelCase__ = self.sigmas[step_index - 1]
lowerCAmelCase__ = self.sigmas_interpol[step_index]
lowerCAmelCase__ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
lowerCAmelCase__ = 0
lowerCAmelCase__ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
lowerCAmelCase__ = sigma_hat if self.state_in_first_order else sigma_interpol
lowerCAmelCase__ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
lowerCAmelCase__ = sigma_hat if self.state_in_first_order else sigma_interpol
lowerCAmelCase__ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("prediction_type not implemented yet: sample" )
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
lowerCAmelCase__ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
lowerCAmelCase__ = sigma_interpol - sigma_hat
# store for 2nd order step
lowerCAmelCase__ = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
lowerCAmelCase__ = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
lowerCAmelCase__ = sigma_next - sigma_hat
lowerCAmelCase__ = self.sample
lowerCAmelCase__ = None
lowerCAmelCase__ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , )-> torch.FloatTensor:
'''simple docstring'''
lowerCAmelCase__ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__UpperCAmelCase ):
# mps does not support float64
lowerCAmelCase__ = self.timesteps.to(original_samples.device , dtype=torch.floataa )
lowerCAmelCase__ = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
lowerCAmelCase__ = self.timesteps.to(original_samples.device )
lowerCAmelCase__ = timesteps.to(original_samples.device )
lowerCAmelCase__ = [self.index_for_timestep(__UpperCAmelCase , __UpperCAmelCase ) for t in timesteps]
lowerCAmelCase__ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
lowerCAmelCase__ = sigma.unsqueeze(-1 )
lowerCAmelCase__ = original_samples + noise * sigma
return noisy_samples
def __len__( self )-> List[Any]:
'''simple docstring'''
return self.config.num_train_timesteps
| 115 | 1 |
"""simple docstring"""
from collections.abc import Sequence
def lowercase ( __snake_case : Tuple , __snake_case : Dict ):
return sum(c * (x**i) for i, c in enumerate(SCREAMING_SNAKE_CASE__ ) )
def lowercase ( __snake_case : List[Any] , __snake_case : Tuple ):
lowercase_ : int = 0.0
for coeff in reversed(SCREAMING_SNAKE_CASE__ ):
lowercase_ : Tuple = result * x + coeff
return result
if __name__ == "__main__":
__A : str = (0.0, 0.0, 5.0, 9.3, 7.0)
__A : int = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 231 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
lowercase__ = "\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n"
lowercase__ = "\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n"
lowercase__ = "\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the CUAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\n 'aupr': Area Under the Precision-Recall curve\n 'prec_at_80_recall': Precision at 80% recall\n 'prec_at_90_recall': Precision at 90% recall\nExamples:\n >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> cuad_metric = datasets.load_metric(\"cuad\")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {
'''id''': datasets.Value('''string''' ),
'''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ),
},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://www.atticusprojectai.org/cuad'''] , reference_urls=['''https://www.atticusprojectai.org/cuad'''] , )
def lowerCAmelCase ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : str ) -> Optional[Any]:
"""simple docstring"""
snake_case : Tuple = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
snake_case : Dict = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
snake_case : int = evaluate(dataset=UpperCamelCase__ , predictions=UpperCamelCase__ )
return score
| 638 | 0 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__A : int = 0
__A : Tuple = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__A : Dict = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__A : str = tuple[int, int]
class __UpperCamelCase :
def __init__( self :int ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :Node | None ,):
snake_case_ : Optional[Any] = pos_x
snake_case_ : Optional[Any] = pos_y
snake_case_ : Optional[int] = (pos_y, pos_x)
snake_case_ : Optional[int] = goal_x
snake_case_ : Union[str, Any] = goal_y
snake_case_ : List[Any] = g_cost
snake_case_ : Tuple = parent
snake_case_ : Optional[int] = self.calculate_heuristic()
snake_case_ : Tuple = self.g_cost + self.h_cost
def a__ ( self :Any ):
snake_case_ : Union[str, Any] = self.pos_x - self.goal_x
snake_case_ : int = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(_UpperCamelCase ) + abs(_UpperCamelCase )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self :List[Any] ,_UpperCamelCase :Node ):
return self.f_cost < other.f_cost
class __UpperCamelCase :
def __init__( self :str ,_UpperCamelCase :TPosition ,_UpperCamelCase :TPosition ):
snake_case_ : List[str] = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,0 ,_UpperCamelCase )
snake_case_ : Optional[int] = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,9_9_9_9_9 ,_UpperCamelCase )
snake_case_ : Dict = [self.start]
snake_case_ : list[Node] = []
snake_case_ : Optional[int] = False
def a__ ( self :List[Any] ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
snake_case_ : Any = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(_UpperCamelCase )
self.closed_nodes.append(_UpperCamelCase )
snake_case_ : Union[str, Any] = self.get_successors(_UpperCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_UpperCamelCase )
else:
# retrieve the best current path
snake_case_ : int = self.open_nodes.pop(self.open_nodes.index(_UpperCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_UpperCamelCase )
else:
self.open_nodes.append(_UpperCamelCase )
return [self.start.pos]
def a__ ( self :int ,_UpperCamelCase :Node ):
snake_case_ : Union[str, Any] = []
for action in delta:
snake_case_ : Union[str, Any] = parent.pos_x + action[1]
snake_case_ : List[Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_UpperCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_UpperCamelCase ,_UpperCamelCase ,self.target.pos_y ,self.target.pos_x ,parent.g_cost + 1 ,_UpperCamelCase ,) )
return successors
def a__ ( self :List[str] ,_UpperCamelCase :Node | None ):
snake_case_ : int = node
snake_case_ : Tuple = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case_ : List[str] = current_node.parent
path.reverse()
return path
class __UpperCamelCase :
def __init__( self :Union[str, Any] ,_UpperCamelCase :TPosition ,_UpperCamelCase :TPosition ):
snake_case_ : Optional[Any] = AStar(_UpperCamelCase ,_UpperCamelCase )
snake_case_ : List[str] = AStar(_UpperCamelCase ,_UpperCamelCase )
snake_case_ : str = False
def a__ ( self :int ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
snake_case_ : Tuple = self.fwd_astar.open_nodes.pop(0 )
snake_case_ : str = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
_UpperCamelCase ,_UpperCamelCase )
self.fwd_astar.closed_nodes.append(_UpperCamelCase )
self.bwd_astar.closed_nodes.append(_UpperCamelCase )
snake_case_ : List[str] = current_bwd_node
snake_case_ : int = current_fwd_node
snake_case_ : Dict = {
self.fwd_astar: self.fwd_astar.get_successors(_UpperCamelCase ),
self.bwd_astar: self.bwd_astar.get_successors(_UpperCamelCase ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(_UpperCamelCase )
else:
# retrieve the best current path
snake_case_ : int = astar.open_nodes.pop(
astar.open_nodes.index(_UpperCamelCase ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(_UpperCamelCase )
else:
astar.open_nodes.append(_UpperCamelCase )
return [self.fwd_astar.start.pos]
def a__ ( self :int ,_UpperCamelCase :Node ,_UpperCamelCase :Node ):
snake_case_ : Optional[int] = self.fwd_astar.retrace_path(_UpperCamelCase )
snake_case_ : Tuple = self.bwd_astar.retrace_path(_UpperCamelCase )
bwd_path.pop()
bwd_path.reverse()
snake_case_ : Union[str, Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__A : Optional[int] = (0, 0)
__A : List[str] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__A : Optional[Any] = time.time()
__A : str = AStar(init, goal)
__A : Union[str, Any] = a_star.search()
__A : List[Any] = time.time() - start_time
print(F'AStar execution time = {end_time:f} seconds')
__A : int = time.time()
__A : Dict = BidirectionalAStar(init, goal)
__A : str = time.time() - bd_start_time
print(F'BidirectionalAStar execution time = {bd_end_time:f} seconds') | 267 |
'''simple docstring'''
import torch
from transformers import AutoModel
class __UpperCamelCase ( torch.nn.Module ):
def __init__( self :Union[str, Any] ,_UpperCamelCase :Tuple="sayef/fsner-bert-base-uncased" ):
super(_UpperCamelCase ,self ).__init__()
snake_case_ : Dict = AutoModel.from_pretrained(_UpperCamelCase ,return_dict=_UpperCamelCase )
snake_case_ : Dict = torch.nn.CosineSimilarity(3 ,1E-0_8 )
snake_case_ : int = torch.nn.Softmax(dim=1 )
def a__ ( self :Optional[Any] ,**_UpperCamelCase :Optional[Any] ):
return self.bert(**_UpperCamelCase ).last_hidden_state
def a__ ( self :List[Any] ,_UpperCamelCase :Dict ):
return token_embeddings.sum(2 ,keepdim=_UpperCamelCase )
def a__ ( self :Optional[Any] ,_UpperCamelCase :List[str] ,_UpperCamelCase :List[str] ,_UpperCamelCase :List[str]=1 ):
return self.softmax(T * self.cos(_UpperCamelCase ,_UpperCamelCase ) )
def a__ ( self :Optional[Any] ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :int ):
snake_case_ : Union[str, Any] = W_supports["""sizes"""].tolist()
snake_case_ : Dict = W_supports["""start_token_id"""].item()
snake_case_ : int = W_supports["""end_token_id"""].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
snake_case_ : int = self.BERT(**_UpperCamelCase )
snake_case_ : int = self.BERT(**_UpperCamelCase )
snake_case_ : Any = None
snake_case_ : Any = None
snake_case_ : Union[str, Any] = W_supports["""input_ids"""] == start_token_id
snake_case_ : Tuple = W_supports["""input_ids"""] == end_token_id
for i, size in enumerate(_UpperCamelCase ):
if i == 0:
snake_case_ : str = 0
else:
snake_case_ : Optional[Any] = support_sizes[i - 1]
snake_case_ : Dict = S[s : s + size][start_token_masks[s : s + size]]
snake_case_ : Optional[Any] = S[s : s + size][end_token_masks[s : s + size]]
snake_case_ : int = torch.matmul(q[i] ,s_start.T ).sum(1 ).softmax(0 )
snake_case_ : List[str] = torch.matmul(q[i] ,s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
snake_case_ : List[str] = torch.vstack((p_starts, p_start) )
snake_case_ : Tuple = torch.vstack((p_ends, p_end) )
else:
snake_case_ : Any = p_start
snake_case_ : List[str] = p_end
return p_starts, p_ends | 267 | 1 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
_lowerCAmelCase = random.Random()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase=1.0 , _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
if rng is None:
_lowerCAmelCase : str = global_rng
_lowerCAmelCase : str = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self ,_A ,_A=7 ,_A=400 ,_A=2000 ,_A=2048 ,_A=128 ,_A=1 ,_A=512 ,_A=30 ,_A=4_4100 ,):
'''simple docstring'''
_lowerCAmelCase : List[str] = parent
_lowerCAmelCase : List[Any] = batch_size
_lowerCAmelCase : Dict = min_seq_length
_lowerCAmelCase : Any = max_seq_length
_lowerCAmelCase : Union[str, Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowerCAmelCase : List[Any] = spectrogram_length
_lowerCAmelCase : Tuple = feature_size
_lowerCAmelCase : str = num_audio_channels
_lowerCAmelCase : List[str] = hop_length
_lowerCAmelCase : Any = chunk_length
_lowerCAmelCase : List[Any] = sampling_rate
def __lowerCamelCase ( self ):
'''simple docstring'''
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def __lowerCamelCase ( self ,_A=False ,_A=False ):
'''simple docstring'''
def _flatten(_A ):
return list(itertools.chain(*__lowerCamelCase ) )
if equal_length:
_lowerCAmelCase : List[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_lowerCAmelCase : Union[str, Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
_lowerCAmelCase : Optional[int] = [np.asarray(__lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __UpperCamelCase ( UpperCAmelCase_ , unittest.TestCase ):
_UpperCAmelCase = TvltFeatureExtractor
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = TvltFeatureExtractionTester(self )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(__lowerCamelCase ,'spectrogram_length' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'feature_size' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'num_audio_channels' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'hop_length' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'chunk_length' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'sampling_rate' ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : str = feat_extract_first.save_pretrained(__lowerCamelCase )[0]
check_json_file_has_correct_format(__lowerCamelCase )
_lowerCAmelCase : Dict = self.feature_extraction_class.from_pretrained(__lowerCamelCase )
_lowerCAmelCase : List[Any] = feat_extract_first.to_dict()
_lowerCAmelCase : List[str] = feat_extract_second.to_dict()
_lowerCAmelCase : Any = dict_first.pop('mel_filters' )
_lowerCAmelCase : Any = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(__lowerCamelCase ,__lowerCamelCase ) )
self.assertEqual(__lowerCamelCase ,__lowerCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : List[Any] = os.path.join(__lowerCamelCase ,'feat_extract.json' )
feat_extract_first.to_json_file(__lowerCamelCase )
_lowerCAmelCase : Optional[int] = self.feature_extraction_class.from_json_file(__lowerCamelCase )
_lowerCAmelCase : List[Any] = feat_extract_first.to_dict()
_lowerCAmelCase : Dict = feat_extract_second.to_dict()
_lowerCAmelCase : str = dict_first.pop('mel_filters' )
_lowerCAmelCase : Union[str, Any] = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(__lowerCamelCase ,__lowerCamelCase ) )
self.assertEqual(__lowerCamelCase ,__lowerCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_lowerCAmelCase : str = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
_lowerCAmelCase : List[Any] = [np.asarray(__lowerCamelCase ) for speech_input in speech_inputs]
# Test not batched input
_lowerCAmelCase : Optional[Any] = feature_extractor(np_speech_inputs[0] ,return_tensors='np' ,sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_lowerCAmelCase : int = feature_extractor(__lowerCamelCase ,return_tensors='np' ,sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_lowerCAmelCase : Tuple = feature_extractor(
__lowerCamelCase ,return_tensors='np' ,sampling_rate=4_4100 ,mask_audio=__lowerCamelCase ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_lowerCAmelCase : Any = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_lowerCAmelCase : Union[str, Any] = np.asarray(__lowerCamelCase )
_lowerCAmelCase : Dict = feature_extractor(__lowerCamelCase ,return_tensors='np' ,sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = load_dataset('hf-internal-testing/librispeech_asr_dummy' ,'clean' ,split='validation' )
# automatic decoding with librispeech
_lowerCAmelCase : Optional[int] = ds.sort('id' ).select(range(__lowerCamelCase ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self._load_datasamples(1 )
_lowerCAmelCase : Tuple = TvltFeatureExtractor()
_lowerCAmelCase : Union[str, Any] = feature_extractor(__lowerCamelCase ,return_tensors='pt' ).audio_values
self.assertEquals(audio_values.shape ,(1, 1, 192, 128) )
_lowerCAmelCase : List[str] = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] ,__lowerCamelCase ,atol=1E-4 ) )
| 259 |
def SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(snake_case ) )
def SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , snake_case ) -> bool:
# Base Case
if index == len(snake_case ):
return True
# Recursive Step
for i in range(snake_case ):
if valid_coloring(graph[index] , snake_case , snake_case ):
# Color current vertex
__lowercase = i
# Validate coloring
if util_color(snake_case , snake_case , snake_case , index + 1 ):
return True
# Backtrack
__lowercase = -1
return False
def SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> list[int]:
__lowercase = [-1] * len(snake_case )
if util_color(snake_case , snake_case , snake_case , 0 ):
return colored_vertices
return []
| 375 | 0 |
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : Any ) -> int:
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise ValueError('Input must be an integer' )
if input_num <= 0:
raise ValueError('Input must be positive' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704 | '''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE ):
A_ : List[str] = 'roberta'
def __init__( self : Union[str, Any] , UpperCamelCase_ : Dict=5_02_65 , UpperCamelCase_ : List[Any]=7_68 , UpperCamelCase_ : List[Any]=12 , UpperCamelCase_ : int=12 , UpperCamelCase_ : str=30_72 , UpperCamelCase_ : List[Any]="gelu" , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : str=0.1 , UpperCamelCase_ : Any=5_12 , UpperCamelCase_ : Any=2 , UpperCamelCase_ : int=0.02 , UpperCamelCase_ : List[str]=1e-12 , UpperCamelCase_ : Optional[int]=1 , UpperCamelCase_ : str=0 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : Optional[Any]="absolute" , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : Dict=None , **UpperCamelCase_ : Any , ) -> List[str]:
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :List[Any] = vocab_size
SCREAMING_SNAKE_CASE__ :List[Any] = hidden_size
SCREAMING_SNAKE_CASE__ :str = num_hidden_layers
SCREAMING_SNAKE_CASE__ :Tuple = num_attention_heads
SCREAMING_SNAKE_CASE__ :Any = hidden_act
SCREAMING_SNAKE_CASE__ :List[str] = intermediate_size
SCREAMING_SNAKE_CASE__ :List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ :Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ :List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ :Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE__ :Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ :Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE__ :List[Any] = position_embedding_type
SCREAMING_SNAKE_CASE__ :int = use_cache
SCREAMING_SNAKE_CASE__ :Dict = classifier_dropout
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE ):
@property
def __lowerCamelCase ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ :Optional[int] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE__ :List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 320 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase : str = logging.get_logger(__name__)
def UpperCamelCase_ ( __a , __a=False ) -> Dict:
a__ : Dict = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
a__ : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def UpperCamelCase_ ( __a , __a , __a=False ) -> Optional[int]:
for i in range(config.num_hidden_layers ):
if base_model:
a__ : Optional[Any] = ""
else:
a__ : List[Any] = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a__ : List[Any] = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
a__ : Union[str, Any] = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
a__ : List[str] = in_proj_weight[
: config.hidden_size, :
]
a__ : List[Any] = in_proj_bias[: config.hidden_size]
a__ : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a__ : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a__ : Tuple = in_proj_weight[
-config.hidden_size :, :
]
a__ : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def UpperCamelCase_ ( __a , __a , __a ) -> int:
a__ : List[str] = dct.pop(__a )
a__ : List[Any] = val
def UpperCamelCase_ ( ) -> Optional[Any]:
a__ : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
a__ : Dict = Image.open(requests.get(__a , stream=__a ).raw )
return im
@torch.no_grad()
def UpperCamelCase_ ( __a , __a ) -> List[str]:
a__ : str = DeiTConfig()
# all deit models have fine-tuned heads
a__ : Tuple = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
a__ : Optional[Any] = 1_000
a__ : List[str] = "huggingface/label-files"
a__ : int = "imagenet-1k-id2label.json"
a__ : List[str] = json.load(open(hf_hub_download(__a , __a , repo_type="dataset" ) , "r" ) )
a__ : Union[str, Any] = {int(__a ): v for k, v in idalabel.items()}
a__ : Optional[Any] = idalabel
a__ : List[str] = {v: k for k, v in idalabel.items()}
a__ : Optional[Any] = int(deit_name[-6:-4] )
a__ : Tuple = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
a__ : Dict = 192
a__ : Union[str, Any] = 768
a__ : List[str] = 12
a__ : Union[str, Any] = 3
elif deit_name[9:].startswith("small" ):
a__ : int = 384
a__ : int = 1_536
a__ : Optional[int] = 12
a__ : int = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
a__ : List[str] = 1_024
a__ : List[str] = 4_096
a__ : int = 24
a__ : Any = 16
# load original model from timm
a__ : List[Any] = timm.create_model(__a , pretrained=__a )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
a__ : Dict = timm_model.state_dict()
a__ : Optional[Any] = create_rename_keys(__a , __a )
for src, dest in rename_keys:
rename_key(__a , __a , __a )
read_in_q_k_v(__a , __a , __a )
# load HuggingFace model
a__ : Optional[int] = DeiTForImageClassificationWithTeacher(__a ).eval()
model.load_state_dict(__a )
# Check outputs on an image, prepared by DeiTImageProcessor
a__ : int = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
a__ : Tuple = DeiTImageProcessor(size=__a , crop_size=config.image_size )
a__ : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" )
a__ : Optional[int] = encoding["pixel_values"]
a__ : Any = model(__a )
a__ : List[str] = timm_model(__a )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__a , outputs.logits , atol=1e-3 )
Path(__a ).mkdir(exist_ok=__a )
print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__a )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__a )
if __name__ == "__main__":
UpperCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--deit_name""",
default="""vit_deit_base_distilled_patch16_224""",
type=str,
help="""Name of the DeiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
UpperCamelCase : Optional[int] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 37 |
'''simple docstring'''
from timeit import timeit
__UpperCamelCase : int = {
"""MALAYALAM""": True,
"""String""": False,
"""rotor""": True,
"""level""": True,
"""A""": True,
"""BB""": True,
"""ABC""": False,
"""amanaplanacanalpanama""": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: str ) -> bool:
"""simple docstring"""
__a = 0
__a = len(SCREAMING_SNAKE_CASE__ ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: str ) -> bool:
"""simple docstring"""
__a = len(SCREAMING_SNAKE_CASE__ ) // 2
__a = len(SCREAMING_SNAKE_CASE__ )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(SCREAMING_SNAKE_CASE__ ) )
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: str ) -> bool:
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE__ ) <= 2:
return True
if s[0] == s[len(SCREAMING_SNAKE_CASE__ ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: str ) -> bool:
"""simple docstring"""
return s == s[::-1]
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: str ) -> None:
"""simple docstring"""
__a = f"""all({name}(key) is value for key, value in test_data.items())"""
__a = f"""from __main__ import test_data, {name}"""
__a = 500000
__a = timeit(stmt=SCREAMING_SNAKE_CASE__, setup=SCREAMING_SNAKE_CASE__, number=SCREAMING_SNAKE_CASE__ )
print(f"""{name:<35} finished {number:,} runs in {result:.5f} seconds""" )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f"""{key:21} {value}""")
print("""a man a plan a canal panama""")
# finished 500,000 runs in 0.46793 seconds
benchmark_function("""is_palindrome_slice""")
# finished 500,000 runs in 0.85234 seconds
benchmark_function("""is_palindrome""")
# finished 500,000 runs in 1.32028 seconds
benchmark_function("""is_palindrome_recursive""")
# finished 500,000 runs in 2.08679 seconds
benchmark_function("""is_palindrome_traversal""") | 448 | 0 |
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
_lowercase : Tuple =2_9_9_7_9_2_4_5_8
# Symbols
_lowercase : Any =symbols('''ct x y z''')
def A__ ( lowercase: float ) -> float:
if velocity > c:
raise ValueError('Speed must not exceed light speed 299,792,458 [m/s]!' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('Speed must be greater than or equal to 1!' )
return velocity / c
def A__ ( lowercase: float ) -> float:
return 1 / sqrt(1 - beta(lowercase ) ** 2 )
def A__ ( lowercase: float ) -> np.ndarray:
return np.array(
[
[gamma(lowercase ), -gamma(lowercase ) * beta(lowercase ), 0, 0],
[-gamma(lowercase ) * beta(lowercase ), gamma(lowercase ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def A__ ( lowercase: float, lowercase: np.ndarray | None = None ) -> np.ndarray:
# Ensure event is not empty
if event is None:
A : List[str] =np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(lowercase ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
_lowercase : Dict =transform(2_9_9_7_9_2_4_5)
print('''Example of four vector: ''')
print(f'''ct\' = {four_vector[0]}''')
print(f'''x\' = {four_vector[1]}''')
print(f'''y\' = {four_vector[2]}''')
print(f'''z\' = {four_vector[3]}''')
# Substitute symbols with numerical values
_lowercase : Dict ={ct: c, x: 1, y: 1, z: 1}
_lowercase : Dict =[four_vector[i].subs(sub_dict) for i in range(4)]
print(f'''\n{numerical_vector}''')
| 711 | import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
_lowercase : Any =logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Optional[float] = field(
default=0.0 , metadata={"help": "The label smoothing epsilon to apply (if not zero)."} )
lowercase : bool = field(default=lowerCAmelCase_ , metadata={"help": "Whether to SortishSamler or not."} )
lowercase : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowercase : bool = field(default=lowerCAmelCase_ , metadata={"help": "whether to use adafactor"} )
lowercase : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Encoder layer dropout probability. Goes into model.config."} )
lowercase : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Decoder layer dropout probability. Goes into model.config."} )
lowercase : Optional[float] = field(default=lowerCAmelCase_ , metadata={"help": "Dropout probability. Goes into model.config."} )
lowercase : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Attention dropout probability. Goes into model.config."} )
lowercase : Optional[str] = field(
default="linear" , metadata={"help": f'Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'} , )
| 661 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'facebook/wav2vec2-base-960h': 'https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCAmelCase_ = "wav2vec2"
def __init__(self , UpperCAmelCase=32 , UpperCAmelCase=768 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase=3072 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase="group" , UpperCAmelCase="gelu" , UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) , UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , UpperCAmelCase=False , UpperCAmelCase=128 , UpperCAmelCase=16 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.05 , UpperCAmelCase=10 , UpperCAmelCase=2 , UpperCAmelCase=0.0 , UpperCAmelCase=10 , UpperCAmelCase=0 , UpperCAmelCase=320 , UpperCAmelCase=2 , UpperCAmelCase=0.1 , UpperCAmelCase=100 , UpperCAmelCase=256 , UpperCAmelCase=256 , UpperCAmelCase=0.1 , UpperCAmelCase="sum" , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=256 , UpperCAmelCase=(512, 512, 512, 512, 1500) , UpperCAmelCase=(5, 3, 3, 1, 1) , UpperCAmelCase=(1, 2, 3, 1, 1) , UpperCAmelCase=512 , UpperCAmelCase=0 , UpperCAmelCase=1 , UpperCAmelCase=2 , UpperCAmelCase=False , UpperCAmelCase=3 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase , ) -> Any:
super().__init__(**UpperCAmelCase , pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase )
_snake_case = hidden_size
_snake_case = feat_extract_norm
_snake_case = feat_extract_activation
_snake_case = list(UpperCAmelCase )
_snake_case = list(UpperCAmelCase )
_snake_case = list(UpperCAmelCase )
_snake_case = conv_bias
_snake_case = num_conv_pos_embeddings
_snake_case = num_conv_pos_embedding_groups
_snake_case = len(self.conv_dim )
_snake_case = num_hidden_layers
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = num_attention_heads
_snake_case = hidden_dropout
_snake_case = attention_dropout
_snake_case = activation_dropout
_snake_case = feat_proj_dropout
_snake_case = final_dropout
_snake_case = layerdrop
_snake_case = layer_norm_eps
_snake_case = initializer_range
_snake_case = vocab_size
_snake_case = do_stable_layer_norm
_snake_case = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_snake_case = apply_spec_augment
_snake_case = mask_time_prob
_snake_case = mask_time_length
_snake_case = mask_time_min_masks
_snake_case = mask_feature_prob
_snake_case = mask_feature_length
_snake_case = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_snake_case = num_codevectors_per_group
_snake_case = num_codevector_groups
_snake_case = contrastive_logits_temperature
_snake_case = feat_quantizer_dropout
_snake_case = num_negatives
_snake_case = codevector_dim
_snake_case = proj_codevector_dim
_snake_case = diversity_loss_weight
# ctc loss
_snake_case = ctc_loss_reduction
_snake_case = ctc_zero_infinity
# adapter
_snake_case = add_adapter
_snake_case = adapter_kernel_size
_snake_case = adapter_stride
_snake_case = num_adapter_layers
_snake_case = output_hidden_size or hidden_size
_snake_case = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_snake_case = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_snake_case = list(UpperCAmelCase )
_snake_case = list(UpperCAmelCase )
_snake_case = list(UpperCAmelCase )
_snake_case = xvector_output_dim
@property
def lowercase (self ) -> int:
return functools.reduce(operator.mul , self.conv_stride , 1 ) | 585 |
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__lowerCAmelCase = logging.getLogger(__name__)
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = argparse.ArgumentParser(
description="""Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.""" )
parser.add_argument(
"""--dataset_name""" , type=_SCREAMING_SNAKE_CASE , default="""wikitext""" , help="""Name of the training. Explore datasets at: hf.co/datasets.""" , )
parser.add_argument(
"""--dataset_config""" , type=_SCREAMING_SNAKE_CASE , default="""wikitext-103-raw-v1""" , help="""Configuration name of the dataset.""" )
parser.add_argument(
"""--tokenizer_name_or_path""" , type=_SCREAMING_SNAKE_CASE , default="""sayakpaul/unigram-tokenizer-wikitext""" , help="""Tokenizer identifier. Can be a local filepath or a Hub identifier.""" , )
parser.add_argument(
"""--shard_size""" , type=_SCREAMING_SNAKE_CASE , default=1000 , help="""Number of entries to go in a single shard.""" , )
parser.add_argument("""--split""" , type=_SCREAMING_SNAKE_CASE , default="""train""" , choices=["""train""", """test""", """validation"""] )
parser.add_argument(
"""--limit""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help="""Limit the number of shards (used for debugging).""" , )
parser.add_argument(
"""--max_length""" , type=_SCREAMING_SNAKE_CASE , default=512 , help="""Maximum sequence length. For training on TPUs, it helps to have a maximum"""
""" sequence length that is a multiple of 8.""" , )
parser.add_argument(
"""--output_dir""" , default="""tf-tpu""" , type=_SCREAMING_SNAKE_CASE , help="""Output directory where the TFRecord shards will be saved. If the"""
""" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"""
""" shards will be directly saved to a Google Cloud Storage bucket.""" , )
_snake_case = parser.parse_args()
return args
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
def fn(_SCREAMING_SNAKE_CASE ):
return tokenizer(examples["""text"""] )
return fn
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = []
for i in range(len(tokenized_data["""input_ids"""] ) ):
_snake_case = {
"""input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["""input_ids"""][i] ) ),
"""attention_mask""": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["""attention_mask"""][i] ) ),
}
_snake_case = tf.train.Features(feature=_SCREAMING_SNAKE_CASE )
_snake_case = tf.train.Example(features=_SCREAMING_SNAKE_CASE )
_snake_case = example.SerializeToString()
records.append(_SCREAMING_SNAKE_CASE )
return records
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
_snake_case = min(len(_SCREAMING_SNAKE_CASE ) , args.limit )
_snake_case = dataset.select(range(_SCREAMING_SNAKE_CASE ) )
print(f"""Limiting the dataset to {args.limit} entries.""" )
_snake_case = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
_snake_case = os.path.join(args.output_dir , args.split )
if not os.path.exists(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
else:
_snake_case = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
_snake_case = tokenize_function(_SCREAMING_SNAKE_CASE )
_snake_case = dataset.map(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , num_proc=4 , remove_columns=["""text"""] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(_SCREAMING_SNAKE_CASE ):
# Concatenate all texts.
_snake_case = {k: sum(examples[k] , [] ) for k in examples.keys()}
_snake_case = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
_snake_case = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
_snake_case = {
k: [t[i : i + args.max_length] for i in range(0 , _SCREAMING_SNAKE_CASE , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
_snake_case = dataset_tokenized.map(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , batch_size=1000 , num_proc=4 )
_snake_case = 0
_snake_case = 0
for shard in range(0 , len(_SCREAMING_SNAKE_CASE ) , args.shard_size ):
_snake_case = grouped_dataset[shard : shard + args.shard_size]
_snake_case = len(dataset_snapshot["""input_ids"""] )
_snake_case = os.path.join(_SCREAMING_SNAKE_CASE , f"""dataset-{shard_count}-{records_containing}.tfrecord""" )
_snake_case = get_serialized_examples(_SCREAMING_SNAKE_CASE )
with tf.io.TFRecordWriter(_SCREAMING_SNAKE_CASE ) as out_file:
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
_snake_case = serialized_examples[i]
out_file.write(_SCREAMING_SNAKE_CASE )
print("""Wrote file {} containing {} records""".format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
shard_count += 1
total_records += records_containing
with open(f"""split-{args.split}-records-count.txt""" , """w""" ) as f:
print(f"""Total {args.split} records: {total_records}""" , file=_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowerCAmelCase = parse_args()
main(args) | 585 | 1 |
import copy
import re
class A__ :
UpperCAmelCase = "hp"
UpperCAmelCase = {}
UpperCAmelCase = None
@classmethod
def __UpperCamelCase ( cls : Optional[int] , _a : Optional[int] , _a : List[str] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =prefix
_SCREAMING_SNAKE_CASE =defaults
cls.build_naming_info()
@staticmethod
def __UpperCamelCase ( _a : Optional[Any] , _a : List[str] ) -> Optional[int]:
"""simple docstring"""
if len(_a ) == 0:
return ""
_SCREAMING_SNAKE_CASE =None
if any(char.isdigit() for char in word ):
raise Exception(f"Parameters should not contain numbers: '{word}' contains a number" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(_a ) + 1 ):
_SCREAMING_SNAKE_CASE =word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
_SCREAMING_SNAKE_CASE =prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(_a : Optional[Any] ):
_SCREAMING_SNAKE_CASE =''''''
while integer != 0:
_SCREAMING_SNAKE_CASE =chr(ord('''A''' ) + integer % 10 ) + s
integer //= 10
return s
_SCREAMING_SNAKE_CASE =0
while True:
_SCREAMING_SNAKE_CASE =word + '''#''' + int_to_alphabetic(_a )
if sword in info["reverse_short_word"]:
continue
else:
_SCREAMING_SNAKE_CASE =sword
break
_SCREAMING_SNAKE_CASE =short_word
_SCREAMING_SNAKE_CASE =word
return short_word
@staticmethod
def __UpperCamelCase ( _a : Optional[Any] , _a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =param_name.split('''_''' )
_SCREAMING_SNAKE_CASE =[TrialShortNamer.shortname_for_word(_a , _a ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
_SCREAMING_SNAKE_CASE =['''''', '''_''']
for separator in separators:
_SCREAMING_SNAKE_CASE =separator.join(_a )
if shortname not in info["reverse_short_param"]:
_SCREAMING_SNAKE_CASE =shortname
_SCREAMING_SNAKE_CASE =param_name
return shortname
return param_name
@staticmethod
def __UpperCamelCase ( _a : Optional[Any] , _a : Optional[int] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =TrialShortNamer.shortname_for_key(_a , _a )
_SCREAMING_SNAKE_CASE =short_name
_SCREAMING_SNAKE_CASE =param_name
@classmethod
def __UpperCamelCase ( cls : Dict ) -> Union[str, Any]:
"""simple docstring"""
if cls.NAMING_INFO is not None:
return
_SCREAMING_SNAKE_CASE ={
'''short_word''': {},
'''reverse_short_word''': {},
'''short_param''': {},
'''reverse_short_param''': {},
}
_SCREAMING_SNAKE_CASE =list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(_a , _a )
_SCREAMING_SNAKE_CASE =info
@classmethod
def __UpperCamelCase ( cls : Dict , _a : Optional[int] ) -> str:
"""simple docstring"""
cls.build_naming_info()
assert cls.PREFIX is not None
_SCREAMING_SNAKE_CASE =[copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f"You should provide a default value for the param name {k} with value {v}" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
_SCREAMING_SNAKE_CASE =cls.NAMING_INFO['''short_param'''][k]
if isinstance(_a , _a ):
_SCREAMING_SNAKE_CASE =1 if v else 0
_SCREAMING_SNAKE_CASE ='''''' if isinstance(_a , (int, float) ) else '''-'''
_SCREAMING_SNAKE_CASE =f"{key}{sep}{v}"
name.append(_a )
return "_".join(_a )
@classmethod
def __UpperCamelCase ( cls : Optional[Any] , _a : Dict ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =repr[len(cls.PREFIX ) + 1 :]
if repr == "":
_SCREAMING_SNAKE_CASE =[]
else:
_SCREAMING_SNAKE_CASE =repr.split('''_''' )
_SCREAMING_SNAKE_CASE ={}
for value in values:
if "-" in value:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =value.split('''-''' )
else:
_SCREAMING_SNAKE_CASE =re.sub('''[0-9.]''' , '''''' , _a )
_SCREAMING_SNAKE_CASE =float(re.sub('''[^0-9.]''' , '''''' , _a ) )
_SCREAMING_SNAKE_CASE =cls.NAMING_INFO['''reverse_short_param'''][p_k]
_SCREAMING_SNAKE_CASE =p_v
for k in cls.DEFAULTS:
if k not in parameters:
_SCREAMING_SNAKE_CASE =cls.DEFAULTS[k]
return parameters | 703 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = LEDTokenizer
UpperCAmelCase = LEDTokenizerFast
UpperCAmelCase = True
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
super().setUp()
_SCREAMING_SNAKE_CASE =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_SCREAMING_SNAKE_CASE =dict(zip(_a , range(len(_a ) ) ) )
_SCREAMING_SNAKE_CASE =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_SCREAMING_SNAKE_CASE ={'''unk_token''': '''<unk>'''}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_a ) )
def __UpperCamelCase ( self : str , **_a : int ) -> Optional[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : List[str] , **_a : List[Any] ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : Dict , _a : Any ) -> Dict:
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
_SCREAMING_SNAKE_CASE =[0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_SCREAMING_SNAKE_CASE =tokenizer(_a , max_length=len(_a ) , padding=_a , return_tensors='''pt''' )
self.assertIsInstance(_a , _a )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_SCREAMING_SNAKE_CASE =batch.input_ids.tolist()[0]
self.assertListEqual(_a , _a )
@require_torch
def __UpperCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_SCREAMING_SNAKE_CASE =tokenizer(_a , padding=_a , return_tensors='''pt''' )
self.assertIn('''input_ids''' , _a )
self.assertIn('''attention_mask''' , _a )
self.assertNotIn('''labels''' , _a )
self.assertNotIn('''decoder_attention_mask''' , _a )
@require_torch
def __UpperCamelCase ( self : Dict ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_SCREAMING_SNAKE_CASE =tokenizer(text_target=_a , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_SCREAMING_SNAKE_CASE =tokenizer(
['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=_a , truncation=_a , return_tensors='''pt''' )
self.assertIsInstance(_a , _a )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =['''A long paragraph for summarization.''']
_SCREAMING_SNAKE_CASE =[
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_SCREAMING_SNAKE_CASE =tokenizer(_a , return_tensors='''pt''' )
_SCREAMING_SNAKE_CASE =tokenizer(text_target=_a , return_tensors='''pt''' )
_SCREAMING_SNAKE_CASE =inputs['''input_ids''']
_SCREAMING_SNAKE_CASE =targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_SCREAMING_SNAKE_CASE =['''Summary of the text.''', '''Another summary.''']
_SCREAMING_SNAKE_CASE =[[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_SCREAMING_SNAKE_CASE =tokenizer(_a , padding=_a )
_SCREAMING_SNAKE_CASE =[[0] * len(_a ) for x in encoded_output['''input_ids''']]
_SCREAMING_SNAKE_CASE =tokenizer.pad(_a )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , _a )
def __UpperCamelCase ( self : int ) -> List[str]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_SCREAMING_SNAKE_CASE =self.rust_tokenizer_class.from_pretrained(_a , **_a )
_SCREAMING_SNAKE_CASE =self.tokenizer_class.from_pretrained(_a , **_a )
_SCREAMING_SNAKE_CASE ='''A, <mask> AllenNLP sentence.'''
_SCREAMING_SNAKE_CASE =tokenizer_r.encode_plus(_a , add_special_tokens=_a , return_token_type_ids=_a )
_SCREAMING_SNAKE_CASE =tokenizer_p.encode_plus(_a , add_special_tokens=_a , return_token_type_ids=_a )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
_SCREAMING_SNAKE_CASE =tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
_SCREAMING_SNAKE_CASE =tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
_a , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
_a , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) | 191 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Dict = logging.get_logger(__name__)
lowerCamelCase_ : int = {
"""google/pix2struct-textcaps-base""": (
"""https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"""
),
}
class a__ ( __snake_case ):
A__ : Tuple = 'pix2struct_text_model'
A__ : List[Any] = ['past_key_values']
A__ : List[Any] = {
'hidden_size': 'hidden_size',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , UpperCAmelCase=5_0_2_4_4 , UpperCAmelCase=7_6_8 , UpperCAmelCase=6_4 , UpperCAmelCase=2_0_4_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_2 , UpperCAmelCase=1_2_8 , UpperCAmelCase=0.1 , UpperCAmelCase=1e-6 , UpperCAmelCase=1.0 , UpperCAmelCase="gelu_new" , UpperCAmelCase=0 , UpperCAmelCase=False , UpperCAmelCase=0 , UpperCAmelCase=1 , UpperCAmelCase=False , UpperCAmelCase=True , **UpperCAmelCase , ) -> Any:
__a = vocab_size
__a = hidden_size
__a = d_kv
__a = d_ff
__a = num_layers
__a = num_heads
__a = relative_attention_num_buckets
__a = relative_attention_max_distance
__a = dropout_rate
__a = layer_norm_epsilon
__a = initializer_factor
__a = use_cache
__a = eos_token_id
__a = decoder_start_token_id
# for backwards compatibility
__a = dense_act_fn
super().__init__(
pad_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , decoder_start_token_id=UpperCAmelCase , tie_word_embeddings=UpperCAmelCase , is_decoder=UpperCAmelCase , **UpperCAmelCase , )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , UpperCAmelCase , **UpperCAmelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCAmelCase )
__a , __a = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
__a = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCAmelCase , **UpperCAmelCase )
class a__ ( __snake_case ):
A__ : List[str] = 'pix2struct_vision_model'
def __init__( self , UpperCAmelCase=7_6_8 , UpperCAmelCase=7_6_8 , UpperCAmelCase=2_0_4_8 , UpperCAmelCase=6_4 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase="gelu_new" , UpperCAmelCase=1e-6 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=1e-10 , UpperCAmelCase=1.0 , UpperCAmelCase=4_0_9_6 , UpperCAmelCase=3_2 , UpperCAmelCase=1_2_8 , **UpperCAmelCase , ) -> Any:
super().__init__(**UpperCAmelCase )
__a = hidden_size
__a = patch_embed_hidden_size
__a = d_ff
__a = dropout_rate
__a = num_hidden_layers
__a = num_attention_heads
__a = initializer_range
__a = initializer_factor
__a = attention_dropout
__a = layer_norm_eps
__a = dense_act_fn
__a = seq_len
__a = relative_attention_num_buckets
__a = relative_attention_max_distance
__a = d_kv
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , UpperCAmelCase , **UpperCAmelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCAmelCase )
__a , __a = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
__a = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCAmelCase , **UpperCAmelCase )
class a__ ( __snake_case ):
A__ : List[Any] = 'pix2struct'
A__ : Tuple = True
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=1.0 , UpperCAmelCase=0.02 , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=True , **UpperCAmelCase , ) -> Optional[Any]:
super().__init__(tie_word_embeddings=UpperCAmelCase , is_encoder_decoder=UpperCAmelCase , **UpperCAmelCase )
if text_config is None:
__a = {}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.' )
if vision_config is None:
__a = {}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.' )
__a = PixaStructTextConfig(**UpperCAmelCase )
__a = PixaStructVisionConfig(**UpperCAmelCase )
__a = self.text_config.decoder_start_token_id
__a = self.text_config.pad_token_id
__a = self.text_config.eos_token_id
__a = initializer_factor
__a = initializer_range
__a = self.initializer_range
__a = self.initializer_range
__a = is_vqa
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Dict:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
__a = copy.deepcopy(self.__dict__ )
__a = self.text_config.to_dict()
__a = self.vision_config.to_dict()
__a = self.__class__.model_type
return output
| 559 | import math
import sys
def lowerCAmelCase( __lowerCamelCase ):
if number != int(__lowerCamelCase ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
__a = [-1] * (number + 1)
__a = 0
for i in range(1 , number + 1 ):
__a = sys.maxsize
__a = int(math.sqrt(__lowerCamelCase ) )
for j in range(1 , root + 1 ):
__a = 1 + answers[i - (j**2)]
__a = min(__lowerCamelCase , __lowerCamelCase )
__a = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 559 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class __snake_case :
# setable values
snake_case__ : Optional[int] = None
snake_case__ : Optional[jnp.ndarray] = None
snake_case__ : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Dict ):
"""simple docstring"""
return cls()
@dataclass
class __snake_case ( _lowercase):
snake_case__ : jnp.ndarray
snake_case__ : jnp.ndarray
snake_case__ : KarrasVeSchedulerState
class __snake_case ( _lowercase , _lowercase):
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return True
@register_to_config
def __init__( self : Dict , __lowerCAmelCase : float = 0.02 , __lowerCAmelCase : float = 1_0_0 , __lowerCAmelCase : float = 1.0_07 , __lowerCAmelCase : float = 8_0 , __lowerCAmelCase : float = 0.05 , __lowerCAmelCase : float = 5_0 , ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return KarrasVeSchedulerState.create()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : KarrasVeSchedulerState , __lowerCAmelCase : int , __lowerCAmelCase : Tuple = () ):
"""simple docstring"""
_lowerCamelCase : int = jnp.arange(0 , __lowerCAmelCase )[::-1].copy()
_lowerCamelCase : Optional[Any] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=__lowerCAmelCase , schedule=jnp.array(__lowerCAmelCase , dtype=jnp.floataa ) , timesteps=__lowerCAmelCase , )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : KarrasVeSchedulerState , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : float , __lowerCAmelCase : random.KeyArray , ):
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
_lowerCamelCase : Optional[Any] = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
_lowerCamelCase : Union[str, Any] = 0
# sample eps ~ N(0, S_noise^2 * I)
_lowerCamelCase : Union[str, Any] = random.split(__lowerCAmelCase , num=1 )
_lowerCamelCase : Any = self.config.s_noise * random.normal(key=__lowerCAmelCase , shape=sample.shape )
_lowerCamelCase : Union[str, Any] = sigma + gamma * sigma
_lowerCamelCase : int = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : KarrasVeSchedulerState , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : bool = True , ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = sample_hat + sigma_hat * model_output
_lowerCamelCase : Optional[Any] = (sample_hat - pred_original_sample) / sigma_hat
_lowerCamelCase : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__lowerCAmelCase , derivative=__lowerCAmelCase , state=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : KarrasVeSchedulerState , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : bool = True , ):
"""simple docstring"""
_lowerCamelCase : Tuple = sample_prev + sigma_prev * model_output
_lowerCamelCase : Optional[Any] = (sample_prev - pred_original_sample) / sigma_prev
_lowerCamelCase : Dict = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__lowerCAmelCase , derivative=__lowerCAmelCase , state=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : KarrasVeSchedulerState , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] ):
"""simple docstring"""
raise NotImplementedError()
| 717 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __snake_case ( _lowercase):
snake_case__ : torch.FloatTensor
class __snake_case ( _lowercase , _lowercase):
@register_to_config
def __init__( self : List[str] , __lowerCAmelCase : int = 3_2 , __lowerCAmelCase : int = 6_4 , __lowerCAmelCase : int = 2_0 , __lowerCAmelCase : int = 7_6_8 , __lowerCAmelCase : Union[str, Any]=7_7 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : float = 0.0 , __lowerCAmelCase : str = "silu" , __lowerCAmelCase : Optional[str] = None , __lowerCAmelCase : Optional[str] = None , __lowerCAmelCase : Optional[str] = "linear" , __lowerCAmelCase : Optional[str] = "prd" , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[int] = None , ):
"""simple docstring"""
super().__init__()
_lowerCamelCase : List[str] = num_attention_heads
_lowerCamelCase : Any = attention_head_dim
_lowerCamelCase : List[str] = num_attention_heads * attention_head_dim
_lowerCamelCase : Tuple = additional_embeddings
_lowerCamelCase : Union[str, Any] = time_embed_dim or inner_dim
_lowerCamelCase : int = embedding_proj_dim or embedding_dim
_lowerCamelCase : Optional[int] = clip_embed_dim or embedding_dim
_lowerCamelCase : Union[str, Any] = Timesteps(__lowerCAmelCase , __lowerCAmelCase , 0 )
_lowerCamelCase : Optional[Any] = TimestepEmbedding(__lowerCAmelCase , __lowerCAmelCase , out_dim=__lowerCAmelCase , act_fn=__lowerCAmelCase )
_lowerCamelCase : Any = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
if embedding_proj_norm_type is None:
_lowerCamelCase : List[str] = None
elif embedding_proj_norm_type == "layer":
_lowerCamelCase : Optional[int] = nn.LayerNorm(__lowerCAmelCase )
else:
raise ValueError(f'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
_lowerCamelCase : List[Any] = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
if encoder_hid_proj_type is None:
_lowerCamelCase : List[Any] = None
elif encoder_hid_proj_type == "linear":
_lowerCamelCase : str = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
else:
raise ValueError(f'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
_lowerCamelCase : Optional[int] = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , __lowerCAmelCase ) )
if added_emb_type == "prd":
_lowerCamelCase : str = nn.Parameter(torch.zeros(1 , 1 , __lowerCAmelCase ) )
elif added_emb_type is None:
_lowerCamelCase : Tuple = None
else:
raise ValueError(
f'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
_lowerCamelCase : Tuple = nn.ModuleList(
[
BasicTransformerBlock(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , dropout=__lowerCAmelCase , activation_fn='''gelu''' , attention_bias=__lowerCAmelCase , )
for d in range(__lowerCAmelCase )
] )
if norm_in_type == "layer":
_lowerCamelCase : Union[str, Any] = nn.LayerNorm(__lowerCAmelCase )
elif norm_in_type is None:
_lowerCamelCase : Optional[Any] = None
else:
raise ValueError(f'''Unsupported norm_in_type: {norm_in_type}.''' )
_lowerCamelCase : Union[str, Any] = nn.LayerNorm(__lowerCAmelCase )
_lowerCamelCase : str = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Any = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 )
causal_attention_mask.triu_(1 )
_lowerCamelCase : Dict = causal_attention_mask[None, ...]
self.register_buffer('''causal_attention_mask''' , __lowerCAmelCase , persistent=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = nn.Parameter(torch.zeros(1 , __lowerCAmelCase ) )
_lowerCamelCase : Union[str, Any] = nn.Parameter(torch.zeros(1 , __lowerCAmelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[Any] = {}
def fn_recursive_add_processors(__lowerCAmelCase : str , __lowerCAmelCase : torch.nn.Module , __lowerCAmelCase : Dict[str, AttentionProcessor] ):
if hasattr(__lowerCAmelCase , '''set_processor''' ):
_lowerCamelCase : Tuple = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' , __lowerCAmelCase , __lowerCAmelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return processors
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = len(self.attn_processors.keys() )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and len(__lowerCAmelCase ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(__lowerCAmelCase )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(__lowerCAmelCase : str , __lowerCAmelCase : torch.nn.Module , __lowerCAmelCase : List[Any] ):
if hasattr(__lowerCAmelCase , '''set_processor''' ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
module.set_processor(__lowerCAmelCase )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' , __lowerCAmelCase , __lowerCAmelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.set_attn_processor(AttnProcessor() )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[torch.Tensor, float, int] , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Optional[torch.FloatTensor] = None , __lowerCAmelCase : Optional[torch.BoolTensor] = None , __lowerCAmelCase : bool = True , ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = hidden_states.shape[0]
_lowerCamelCase : Optional[Any] = timestep
if not torch.is_tensor(__lowerCAmelCase ):
_lowerCamelCase : List[str] = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(__lowerCAmelCase ) and len(timesteps.shape ) == 0:
_lowerCamelCase : str = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_lowerCamelCase : Optional[Any] = timesteps * torch.ones(__lowerCAmelCase , dtype=timesteps.dtype , device=timesteps.device )
_lowerCamelCase : List[Any] = self.time_proj(__lowerCAmelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_lowerCamelCase : Optional[int] = timesteps_projected.to(dtype=self.dtype )
_lowerCamelCase : List[str] = self.time_embedding(__lowerCAmelCase )
if self.embedding_proj_norm is not None:
_lowerCamelCase : Dict = self.embedding_proj_norm(__lowerCAmelCase )
_lowerCamelCase : List[str] = self.embedding_proj(__lowerCAmelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_lowerCamelCase : str = self.encoder_hidden_states_proj(__lowerCAmelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('''`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set''' )
_lowerCamelCase : List[Any] = self.proj_in(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = self.positional_embedding.to(hidden_states.dtype )
_lowerCamelCase : int = []
_lowerCamelCase : List[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(__lowerCAmelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_lowerCamelCase : Dict = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_lowerCamelCase : Optional[int] = hidden_states[:, None, :]
_lowerCamelCase : int = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_lowerCamelCase : List[str] = self.prd_embedding.to(hidden_states.dtype ).expand(__lowerCAmelCase , -1 , -1 )
additional_embeds.append(__lowerCAmelCase )
_lowerCamelCase : List[str] = torch.cat(
__lowerCAmelCase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_lowerCamelCase : Tuple = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_lowerCamelCase : int = F.pad(
__lowerCAmelCase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
_lowerCamelCase : int = hidden_states + positional_embeddings
if attention_mask is not None:
_lowerCamelCase : List[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
_lowerCamelCase : List[str] = F.pad(__lowerCAmelCase , (0, self.additional_embeddings) , value=0.0 )
_lowerCamelCase : List[str] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_lowerCamelCase : Optional[int] = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
_lowerCamelCase : Any = self.norm_in(__lowerCAmelCase )
for block in self.transformer_blocks:
_lowerCamelCase : Optional[Any] = block(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
_lowerCamelCase : int = self.norm_out(__lowerCAmelCase )
if self.prd_embedding is not None:
_lowerCamelCase : Union[str, Any] = hidden_states[:, -1]
else:
_lowerCamelCase : List[Any] = hidden_states[:, additional_embeddings_len:]
_lowerCamelCase : int = self.proj_to_clip_embeddings(__lowerCAmelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 598 | 0 |
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def UpperCAmelCase__ ( lowerCamelCase_ : Union[str, Any] ): # picklable for multiprocessing
return x.sum()
def UpperCAmelCase__ ( lowerCamelCase_ : int ): # picklable for multiprocessing
return i + 1
@dataclass
class _UpperCamelCase:
__SCREAMING_SNAKE_CASE : int
__SCREAMING_SNAKE_CASE : str
class _UpperCamelCase( __UpperCamelCase ):
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
__a : Tuple = {}
__a : Optional[Any] = []
__a : List[str] = 1
__a : int = [1, 2]
__a : Tuple = {'a': 1, 'b': 2}
__a : List[Any] = {'a': [1, 2], 'b': [3, 4]}
__a : Optional[Any] = {'a': {'1': 1}, 'b': 2}
__a : int = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
__a : Optional[int] = {}
__a : Optional[Any] = []
__a : Optional[Any] = 2
__a : List[str] = [2, 3]
__a : Optional[Any] = {'a': 2, 'b': 3}
__a : List[str] = {'a': [2, 3], 'b': [4, 5]}
__a : List[str] = {'a': {'1': 2}, 'b': 3}
__a : Optional[Any] = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
self.assertEqual(map_nested(snake_case__ , snake_case__ ) , snake_case__ )
self.assertEqual(map_nested(snake_case__ , snake_case__ ) , snake_case__ )
self.assertEqual(map_nested(snake_case__ , snake_case__ ) , snake_case__ )
self.assertEqual(map_nested(snake_case__ , snake_case__ ) , snake_case__ )
self.assertEqual(map_nested(snake_case__ , snake_case__ ) , snake_case__ )
self.assertEqual(map_nested(snake_case__ , snake_case__ ) , snake_case__ )
self.assertEqual(map_nested(snake_case__ , snake_case__ ) , snake_case__ )
self.assertEqual(map_nested(snake_case__ , snake_case__ ) , snake_case__ )
__a : Tuple = 2
self.assertEqual(map_nested(snake_case__ , snake_case__ , num_proc=snake_case__ ) , snake_case__ )
self.assertEqual(map_nested(snake_case__ , snake_case__ , num_proc=snake_case__ ) , snake_case__ )
self.assertEqual(map_nested(snake_case__ , snake_case__ , num_proc=snake_case__ ) , snake_case__ )
self.assertEqual(map_nested(snake_case__ , snake_case__ , num_proc=snake_case__ ) , snake_case__ )
self.assertEqual(map_nested(snake_case__ , snake_case__ , num_proc=snake_case__ ) , snake_case__ )
self.assertEqual(map_nested(snake_case__ , snake_case__ , num_proc=snake_case__ ) , snake_case__ )
self.assertEqual(map_nested(snake_case__ , snake_case__ , num_proc=snake_case__ ) , snake_case__ )
self.assertEqual(map_nested(snake_case__ , snake_case__ , num_proc=snake_case__ ) , snake_case__ )
__a : Union[str, Any] = {'a': np.eye(2 ), 'b': np.zeros(3 ), 'c': np.ones(2 )}
__a : Tuple = {'a': 2, 'b': 0, 'c': 2}
__a : int = {
'a': np.eye(2 ).astype(snake_case__ ),
'b': np.zeros(3 ).astype(snake_case__ ),
'c': np.ones(2 ).astype(snake_case__ ),
}
self.assertEqual(map_nested(snake_case__ , snake_case__ , map_numpy=snake_case__ ) , snake_case__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(snake_case__ , snake_case__ , map_numpy=snake_case__ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(snake_case__ , snake_case__ , map_numpy=snake_case__ , num_proc=snake_case__ ) , snake_case__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(snake_case__ , snake_case__ , map_numpy=snake_case__ , num_proc=snake_case__ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(snake_case__ ): # can't pickle a local lambda
map_nested(lambda SCREAMING_SNAKE_CASE__ : x + 1 , snake_case__ , num_proc=snake_case__ )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
__a : Optional[int] = {'a': 1, 'b': 2}
__a : Optional[int] = {'a': 3, 'b': 4}
__a : Optional[int] = {'a': 5, 'b': 6}
__a : str = sorted([('a', (1, 3, 5)), ('b', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(snake_case__ , snake_case__ , snake_case__ ) ) , snake_case__ )
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
class _UpperCamelCase:
__SCREAMING_SNAKE_CASE : Tuple = "bar"
__a : Tuple = Foo()
self.assertEqual(foo.my_attr , 'bar' )
with temporary_assignment(snake_case__ , 'my_attr' , 'BAR' ):
self.assertEqual(foo.my_attr , 'BAR' )
self.assertEqual(foo.my_attr , 'bar' )
@pytest.mark.parametrize(
'iterable_length, num_proc, expected_num_proc' , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(1_6, 1_6, 1_6),
(1_6, 1_7, 1_6),
(1_7, 1_6, 1_6),
] , )
def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any] ):
with patch('datasets.utils.py_utils._single_map_nested' ) as mock_single_map_nested, patch(
'datasets.parallel.parallel.Pool' ) as mock_multiprocessing_pool:
__a : str = {f'''{i}''': i for i in range(lowerCamelCase__ )}
__a : Tuple = map_nested(lambda lowerCamelCase_ : x + 1_0 , lowerCamelCase__ , num_proc=lowerCamelCase__ , parallel_min_length=1_6 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class _UpperCamelCase( __UpperCamelCase ):
@require_tf
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
import tensorflow as tf
from tensorflow.keras import layers
__a : Optional[int] = layers.Dense(2 )
def gen_random_output():
__a : Dict = tf.random.uniform((1, 3) )
return model(snake_case__ ).numpy()
with temp_seed(4_2 , set_tensorflow=snake_case__ ):
__a : Tuple = gen_random_output()
with temp_seed(4_2 , set_tensorflow=snake_case__ ):
__a : List[str] = gen_random_output()
__a : Optional[Any] = gen_random_output()
np.testing.assert_equal(snake_case__ , snake_case__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
import torch
def gen_random_output():
__a : str = torch.nn.Linear(3 , 2 )
__a : Union[str, Any] = torch.rand(1 , 3 )
return model(snake_case__ ).detach().numpy()
with temp_seed(4_2 , set_pytorch=snake_case__ ):
__a : Dict = gen_random_output()
with temp_seed(4_2 , set_pytorch=snake_case__ ):
__a : Tuple = gen_random_output()
__a : List[str] = gen_random_output()
np.testing.assert_equal(snake_case__ , snake_case__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(4_2 ):
__a : str = gen_random_output()
with temp_seed(4_2 ):
__a : Optional[Any] = gen_random_output()
__a : Union[str, Any] = gen_random_output()
np.testing.assert_equal(snake_case__ , snake_case__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize('input_data' , [{}] )
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] ):
__a : Optional[int] = NestedDataStructure(lowerCamelCase__ ).data
assert output_data == input_data
@pytest.mark.parametrize(
'data, expected_output' , [
({}, []),
([], []),
('foo', ['foo']),
(['foo', 'bar'], ['foo', 'bar']),
([['foo', 'bar']], ['foo', 'bar']),
([[['foo'], ['bar']]], ['foo', 'bar']),
([[['foo'], 'bar']], ['foo', 'bar']),
({'a': 1, 'b': 2}, [1, 2]),
({'a': [1, 2], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[[3], [4]]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, [4]]}, [1, 2, 3, 4]),
({'a': {'1': 1}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': [2]}, [1, 2]),
] , )
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str ):
__a : Optional[int] = NestedDataStructure(lowerCamelCase__ ).flatten()
assert output == expected_output
def UpperCAmelCase__ ( ):
__a : Union[str, Any] = A(x=1 , y='foobar' )
__a : int = {'x': 1, 'y': 'foobar'}
assert asdict(lowerCamelCase__ ) == expected_output
__a : Optional[int] = {'a': {'b': A(x=1_0 , y='foo' )}, 'c': [A(x=2_0 , y='bar' )]}
__a : Tuple = {'a': {'b': {'x': 1_0, 'y': 'foo'}}, 'c': [{'x': 2_0, 'y': 'bar'}]}
assert asdict(lowerCamelCase__ ) == expected_output
with pytest.raises(lowerCamelCase__ ):
asdict([1, A(x=1_0 , y='foo' )] )
def UpperCAmelCase__ ( lowerCamelCase_ : Any ):
return text.split()
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def UpperCAmelCase__ ( ):
with Pool(2 ) as pool:
__a : Optional[Any] = list(iflatmap_unordered(lowerCamelCase__ , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 1_0 ) )
assert out.count('hello' ) == 1_0
assert out.count('there' ) == 1_0
assert len(lowerCamelCase__ ) == 2_0
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
__a : Optional[int] = list(iflatmap_unordered(lowerCamelCase__ , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 1_0 ) )
assert out.count('hello' ) == 1_0
assert out.count('there' ) == 1_0
assert len(lowerCamelCase__ ) == 2_0
# check that we get items as fast as possible
with Pool(2 ) as pool:
__a : List[str] = []
for yield_time, content in iflatmap_unordered(
lowerCamelCase__ , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'content': 'a'}, {'content': 'b'}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(lowerCamelCase__ )
assert out.count('a' ) == 2
assert out.count('b' ) == 2
assert len(lowerCamelCase__ ) == 4
| 47 | """simple docstring"""
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
__lowerCAmelCase : Optional[int] = None
try:
import msvcrt
except ImportError:
__lowerCAmelCase : List[Any] = None
try:
import fcntl
except ImportError:
__lowerCAmelCase : Optional[int] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
__lowerCAmelCase : int = OSError
# Data
# ------------------------------------------------
__lowerCAmelCase : Dict = [
"Timeout",
"BaseFileLock",
"WindowsFileLock",
"UnixFileLock",
"SoftFileLock",
"FileLock",
]
__lowerCAmelCase : str = "3.0.12"
__lowerCAmelCase : Optional[int] = None
def _UpperCAmelCase ( ):
"""simple docstring"""
global _logger
lowerCAmelCase__ = _logger or logging.getLogger(__name__ )
return _logger
class a_ ( __UpperCamelCase ):
def __init__( self : str , snake_case__ : Optional[int] ):
lowerCAmelCase__ = lock_file
return None
def __str__( self : Any ):
lowerCAmelCase__ = F"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class a_ :
def __init__( self : Tuple , snake_case__ : Optional[int] ):
lowerCAmelCase__ = lock
return None
def __enter__( self : Union[str, Any] ):
return self.lock
def __exit__( self : List[Any] , snake_case__ : Dict , snake_case__ : str , snake_case__ : List[Any] ):
self.lock.release()
return None
class a_ :
def __init__( self : Dict , snake_case__ : Tuple , snake_case__ : Dict=-1 , snake_case__ : Dict=None ):
lowerCAmelCase__ = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
lowerCAmelCase__ = self.hash_filename_if_too_long(snake_case__ , snake_case__ )
# The path to the lock file.
lowerCAmelCase__ = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
lowerCAmelCase__ = None
# The default timeout value.
lowerCAmelCase__ = timeout
# We use this lock primarily for the lock counter.
lowerCAmelCase__ = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
lowerCAmelCase__ = 0
return None
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
return self._lock_file
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return self._timeout
@timeout.setter
def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : str ):
lowerCAmelCase__ = float(snake_case__ )
return None
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
raise NotImplementedError()
def _SCREAMING_SNAKE_CASE ( self : str ):
raise NotImplementedError()
@property
def _SCREAMING_SNAKE_CASE ( self : str ):
return self._lock_file_fd is not None
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Union[str, Any]=None , snake_case__ : Optional[int]=0.05 ):
# Use the default timeout, if no timeout is provided.
if timeout is None:
lowerCAmelCase__ = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
lowerCAmelCase__ = id(self )
lowerCAmelCase__ = self._lock_file
lowerCAmelCase__ = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(snake_case__ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
lowerCAmelCase__ = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , snake_case__ : Tuple=False ):
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
lowerCAmelCase__ = id(self )
lowerCAmelCase__ = self._lock_file
logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
lowerCAmelCase__ = 0
logger().debug(F"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__( self : Optional[Any] ):
self.acquire()
return self
def __exit__( self : Any , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] ):
self.release()
return None
def __del__( self : Optional[int] ):
self.release(force=snake_case__ )
return None
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , snake_case__ : str , snake_case__ : int ):
lowerCAmelCase__ = os.path.basename(snake_case__ )
if len(snake_case__ ) > max_length and max_length > 0:
lowerCAmelCase__ = os.path.dirname(snake_case__ )
lowerCAmelCase__ = str(hash(snake_case__ ) )
lowerCAmelCase__ = filename[: max_length - len(snake_case__ ) - 8] + """...""" + hashed_filename + """.lock"""
return os.path.join(snake_case__ , snake_case__ )
else:
return path
class a_ ( __UpperCamelCase ):
def __init__( self : List[Any] , snake_case__ : int , snake_case__ : str=-1 , snake_case__ : Tuple=None ):
from .file_utils import relative_to_absolute_path
super().__init__(snake_case__ , timeout=snake_case__ , max_filename_length=snake_case__ )
lowerCAmelCase__ = """\\\\?\\""" + relative_to_absolute_path(self.lock_file )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
lowerCAmelCase__ = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
lowerCAmelCase__ = os.open(self._lock_file , snake_case__ )
except OSError:
pass
else:
try:
msvcrt.locking(snake_case__ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(snake_case__ )
else:
lowerCAmelCase__ = fd
return None
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = self._lock_file_fd
lowerCAmelCase__ = None
msvcrt.locking(snake_case__ , msvcrt.LK_UNLCK , 1 )
os.close(snake_case__ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class a_ ( __UpperCamelCase ):
def __init__( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : str=-1 , snake_case__ : List[str]=None ):
lowerCAmelCase__ = os.statvfs(os.path.dirname(snake_case__ ) ).f_namemax
super().__init__(snake_case__ , timeout=snake_case__ , max_filename_length=snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = os.O_RDWR | os.O_CREAT | os.O_TRUNC
lowerCAmelCase__ = os.open(self._lock_file , snake_case__ )
try:
fcntl.flock(snake_case__ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(snake_case__ )
else:
lowerCAmelCase__ = fd
return None
def _SCREAMING_SNAKE_CASE ( self : str ):
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
lowerCAmelCase__ = self._lock_file_fd
lowerCAmelCase__ = None
fcntl.flock(snake_case__ , fcntl.LOCK_UN )
os.close(snake_case__ )
return None
class a_ ( __UpperCamelCase ):
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
lowerCAmelCase__ = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
lowerCAmelCase__ = os.open(self._lock_file , snake_case__ )
except OSError:
pass
else:
lowerCAmelCase__ = fd
return None
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
os.close(self._lock_file_fd )
lowerCAmelCase__ = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
__lowerCAmelCase : Optional[int] = None
if msvcrt:
__lowerCAmelCase : Tuple = WindowsFileLock
elif fcntl:
__lowerCAmelCase : Optional[int] = UnixFileLock
else:
__lowerCAmelCase : Tuple = SoftFileLock
if warnings is not None:
warnings.warn("only soft file lock is available")
| 644 | 0 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
__lowerCamelCase : Optional[int] = ''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> dict[str, str]:
__lowerCamelCase : List[str] = [chr(i + 6_5 ) for i in range(2_6 )]
# Remove duplicate characters from key
__lowerCamelCase : str = remove_duplicates(key.upper() )
__lowerCamelCase : List[Any] = len(lowerCamelCase__ )
# First fill cipher with key characters
__lowerCamelCase : List[str] = {alphabet[i]: char for i, char in enumerate(lowerCamelCase__ )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(lowerCamelCase__ ) , 2_6 ):
__lowerCamelCase : Any = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__lowerCamelCase : Tuple = alphabet[i - offset]
__lowerCamelCase : Optional[int] = char
return cipher_alphabet
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> str:
return "".join(cipher_map.get(lowerCamelCase__ , lowerCamelCase__ ) for ch in message.upper() )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> str:
__lowerCamelCase : Dict = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(lowerCamelCase__ , lowerCamelCase__ ) for ch in message.upper() )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
__lowerCamelCase : Dict = input('Enter message to encode or decode: ' ).strip()
__lowerCamelCase : List[str] = input('Enter keyword: ' ).strip()
__lowerCamelCase : Any = input('Encipher or decipher? E/D:' ).strip()[0].lower()
try:
__lowerCamelCase : Dict = {'e': encipher, 'd': decipher}[option]
except KeyError:
raise KeyError('invalid input option' )
__lowerCamelCase : str = create_cipher_map(lowerCamelCase__ )
print(func(lowerCamelCase__ , lowerCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 703 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a ={
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 337 | 0 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCamelCase__ ( _a):
return getitem, k
def lowerCamelCase__ ( _a , _a):
return setitem, k, v
def lowerCamelCase__ ( _a):
return delitem, k
def lowerCamelCase__ ( _a , _a , *_a):
try:
return fun(_a , *_a), None
except Exception as e:
return None, e
a_ = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
a_ = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
a_ = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
a_ = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
a_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
a_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items"),
pytest.param(_overwrite_items , id="overwrite items"),
pytest.param(_delete_items , id="delete items"),
pytest.param(_access_absent_items , id="access absent items"),
pytest.param(_add_with_resize_up , id="add with resize up"),
pytest.param(_add_with_resize_down , id="add with resize down"),
) , )
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Dict = HashMap(initial_block_size=4)
SCREAMING_SNAKE_CASE : List[str] = {}
for _, (fun, *args) in enumerate(_a):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = _run_operation(_a , _a , *_a)
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[Any] = _run_operation(_a , _a , *_a)
assert my_res == py_res
assert str(_a) == str(_a)
assert set(_a) == set(_a)
assert len(_a) == len(_a)
assert set(my.items()) == set(py.items())
def lowerCamelCase__ ( ):
def is_public(_a) -> bool:
return not name.startswith("_")
SCREAMING_SNAKE_CASE : List[str] = {name for name in dir({}) if is_public(_a)}
SCREAMING_SNAKE_CASE : Union[str, Any] = {name for name in dir(HashMap()) if is_public(_a)}
assert dict_public_names > hash_public_names | 25 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
a_ = get_logger()
a_ = None
class _UpperCamelCase ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
'''simple docstring'''
def __init__( self : Optional[Any] , a : str=None , a : List[Any]=None , **a : Any ) -> Optional[Any]:
"""simple docstring"""
super().__init__(features=a )
import jax
from jaxlib.xla_client import Device
if isinstance(a , a ):
raise ValueError(
F"Expected {device} to be a `str` not {type(a )}, as `jaxlib.xla_extension.Device` "
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
SCREAMING_SNAKE_CASE : List[str] = device if isinstance(a , a ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
SCREAMING_SNAKE_CASE : str = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"Device with string identifier {self.device} not listed among the available "
F"devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default "
F"device: {str(jax.devices()[0] )}." )
SCREAMING_SNAKE_CASE : Any = str(jax.devices()[0] )
SCREAMING_SNAKE_CASE : Any = jnp_array_kwargs
@staticmethod
def __UpperCamelCase ( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
"""simple docstring"""
import jax
return {str(a ): device for device in jax.devices()}
def __UpperCamelCase ( self : Dict , a : Tuple ) -> str:
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(a , a ) and column:
if all(
isinstance(a , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(a , axis=0 )
return column
def __UpperCamelCase ( self : Dict , a : str ) -> str:
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(a , (str, bytes, type(a )) ):
return value
elif isinstance(a , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if isinstance(a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
SCREAMING_SNAKE_CASE : Dict = {"dtype": jnp.intaa}
else:
SCREAMING_SNAKE_CASE : str = {"dtype": jnp.intaa}
elif isinstance(a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
SCREAMING_SNAKE_CASE : int = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(a , PIL.Image.Image ):
SCREAMING_SNAKE_CASE : Dict = np.asarray(a )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
SCREAMING_SNAKE_CASE : Optional[Any] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(a , **{**default_dtype, **self.jnp_array_kwargs} )
def __UpperCamelCase ( self : Any , a : List[str] ) -> Dict:
"""simple docstring"""
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(a , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(a , "__array__" ) and not isinstance(a , jax.Array ):
SCREAMING_SNAKE_CASE : Optional[int] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(a , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(a ) for substruct in data_struct] )
elif isinstance(a , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(a ) for substruct in data_struct] )
return self._tensorize(a )
def __UpperCamelCase ( self : Optional[Any] , a : dict ) -> Dict:
"""simple docstring"""
return map_nested(self._recursive_tensorize , a , map_list=a )
def __UpperCamelCase ( self : Dict , a : pa.Table ) -> Mapping:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.numpy_arrow_extractor().extract_row(a )
SCREAMING_SNAKE_CASE : List[Any] = self.python_features_decoder.decode_row(a )
return self.recursive_tensorize(a )
def __UpperCamelCase ( self : Optional[int] , a : pa.Table ) -> "jax.Array":
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.numpy_arrow_extractor().extract_column(a )
SCREAMING_SNAKE_CASE : Optional[Any] = self.python_features_decoder.decode_column(a , pa_table.column_names[0] )
SCREAMING_SNAKE_CASE : Tuple = self.recursive_tensorize(a )
SCREAMING_SNAKE_CASE : Optional[int] = self._consolidate(a )
return column
def __UpperCamelCase ( self : List[Any] , a : pa.Table ) -> Mapping:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.numpy_arrow_extractor().extract_batch(a )
SCREAMING_SNAKE_CASE : str = self.python_features_decoder.decode_batch(a )
SCREAMING_SNAKE_CASE : List[Any] = self.recursive_tensorize(a )
for column_name in batch:
SCREAMING_SNAKE_CASE : List[Any] = self._consolidate(batch[column_name] )
return batch | 25 | 1 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class _lowercase :
"""simple docstring"""
def __init__( self : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : Dict=True , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Optional[Any]=10 , __lowerCamelCase : str=3 , __lowerCamelCase : Optional[int]=32 * 4 , __lowerCamelCase : Optional[Any]=32 * 6 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : int=32 , ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = parent
lowerCamelCase__ : Union[str, Any] = batch_size
lowerCamelCase__ : int = is_training
lowerCamelCase__ : Tuple = use_auxiliary_loss
lowerCamelCase__ : Tuple = num_queries
lowerCamelCase__ : Tuple = num_channels
lowerCamelCase__ : List[Any] = min_size
lowerCamelCase__ : int = max_size
lowerCamelCase__ : int = num_labels
lowerCamelCase__ : Optional[int] = mask_feature_size
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
UpperCamelCase_ )
lowerCamelCase__ : str = torch.ones([self.batch_size, self.min_size, self.max_size] , device=UpperCamelCase_ )
lowerCamelCase__ : str = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=UpperCamelCase_ ) > 0.5
).float()
lowerCamelCase__ : int = (torch.rand((self.batch_size, self.num_labels) , device=UpperCamelCase_ ) > 0.5).long()
lowerCamelCase__ : Tuple = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
lowerCamelCase__ : Optional[Any] = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def lowerCAmelCase ( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Dict ):
'''simple docstring'''
lowerCamelCase__ : int = output.encoder_hidden_states
lowerCamelCase__ : Union[str, Any] = output.pixel_decoder_hidden_states
lowerCamelCase__ : List[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(UpperCamelCase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCamelCase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCamelCase_ ) , config.decoder_config.decoder_layers )
def lowerCAmelCase ( self : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Tuple=False ):
'''simple docstring'''
with torch.no_grad():
lowerCamelCase__ : Dict = MaskFormerModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCamelCase__ : List[str] = model(pixel_values=UpperCamelCase_ , pixel_mask=UpperCamelCase_ )
lowerCamelCase__ : List[str] = model(UpperCamelCase_ , output_hidden_states=UpperCamelCase_ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase ( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = MaskFormerForInstanceSegmentation(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
def comm_check_on_output(__lowerCamelCase : Tuple ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowerCamelCase__ : List[Any] = model(pixel_values=UpperCamelCase_ , pixel_mask=UpperCamelCase_ )
lowerCamelCase__ : Any = model(UpperCamelCase_ )
comm_check_on_output(UpperCamelCase_ )
lowerCamelCase__ : Any = model(
pixel_values=UpperCamelCase_ , pixel_mask=UpperCamelCase_ , mask_labels=UpperCamelCase_ , class_labels=UpperCamelCase_ )
comm_check_on_output(UpperCamelCase_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _lowercase ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
"""simple docstring"""
A__ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
A__ = (
{'feature-extraction': MaskFormerModel, 'image-segmentation': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
A__ = False
A__ = False
A__ = False
A__ = False
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Tuple = MaskFormerModelTester(self )
lowerCamelCase__ : Dict = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCamelCase_ , **UpperCamelCase_ , output_hidden_states=UpperCamelCase_ )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*UpperCamelCase_ )
@unittest.skip(reason="MaskFormer does not use inputs_embeds" )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
pass
@unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason="MaskFormer is not a generative model" )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason="MaskFormer does not use token embeddings" )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
pass
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : List[str] = model_class(UpperCamelCase_ )
lowerCamelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : List[str] = [*signature.parameters.keys()]
lowerCamelCase__ : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
@slow
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
for model_name in ["facebook/maskformer-swin-small-coco"]:
lowerCamelCase__ : str = MaskFormerModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = (self.model_tester.min_size,) * 2
lowerCamelCase__ : Optional[Any] = {
"pixel_values": torch.randn((2, 3, *size) , device=UpperCamelCase_ ),
"mask_labels": torch.randn((2, 10, *size) , device=UpperCamelCase_ ),
"class_labels": torch.zeros(2 , 10 , device=UpperCamelCase_ ).long(),
}
lowerCamelCase__ : Any = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(UpperCamelCase_ )
lowerCamelCase__ : Dict = model(**UpperCamelCase_ )
self.assertTrue(outputs.loss is not None )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCamelCase_ , **UpperCamelCase_ , output_hidden_states=UpperCamelCase_ )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[Any] = model_class(UpperCamelCase_ ).to(UpperCamelCase_ )
lowerCamelCase__ : Union[str, Any] = model(**UpperCamelCase_ , output_attentions=UpperCamelCase_ )
self.assertTrue(outputs.attentions is not None )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
lowerCamelCase__ : List[Any] = self.all_model_classes[1]
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.train()
lowerCamelCase__ : Dict = model(UpperCamelCase_ , mask_labels=UpperCamelCase_ , class_labels=UpperCamelCase_ ).loss
loss.backward()
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.all_model_classes[1]
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
lowerCamelCase__ : str = True
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : List[str] = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.train()
lowerCamelCase__ : List[Any] = model(UpperCamelCase_ , mask_labels=UpperCamelCase_ , class_labels=UpperCamelCase_ )
lowerCamelCase__ : Optional[Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCamelCase__ : Optional[int] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
lowerCamelCase__ : Optional[int] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCamelCase__ : List[str] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=UpperCamelCase_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
A : List[str] = 1E-4
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class _lowercase ( unittest.TestCase):
"""simple docstring"""
@cached_property
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return (
MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" )
if is_vision_available()
else None
)
def lowerCAmelCase ( self : str ):
'''simple docstring'''
lowerCamelCase__ : str = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(UpperCamelCase_ )
lowerCamelCase__ : Any = self.default_image_processor
lowerCamelCase__ : List[str] = prepare_img()
lowerCamelCase__ : str = image_processor(UpperCamelCase_ , return_tensors="pt" ).to(UpperCamelCase_ )
lowerCamelCase__ : List[str] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCamelCase_ , (1, 3, 800, 1088) )
with torch.no_grad():
lowerCamelCase__ : Optional[int] = model(**UpperCamelCase_ )
lowerCamelCase__ : Any = torch.tensor(
[[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(UpperCamelCase_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
lowerCamelCase__ : str = torch.tensor(
[[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(UpperCamelCase_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
lowerCamelCase__ : Tuple = torch.tensor(
[[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(UpperCamelCase_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : Any = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(UpperCamelCase_ )
.eval()
)
lowerCamelCase__ : str = self.default_image_processor
lowerCamelCase__ : List[Any] = prepare_img()
lowerCamelCase__ : Optional[int] = image_processor(UpperCamelCase_ , return_tensors="pt" ).to(UpperCamelCase_ )
lowerCamelCase__ : List[str] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCamelCase_ , (1, 3, 800, 1088) )
with torch.no_grad():
lowerCamelCase__ : Optional[int] = model(**UpperCamelCase_ )
# masks_queries_logits
lowerCamelCase__ : Union[str, Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowerCamelCase__ : Optional[int] = [
[-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3],
[-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5],
[-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2],
]
lowerCamelCase__ : Optional[int] = torch.tensor(UpperCamelCase_ ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
# class_queries_logits
lowerCamelCase__ : Tuple = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCamelCase__ : int = torch.tensor(
[
[1.6_5_1_2E0_0, -5.2_5_7_2E0_0, -3.3_5_1_9E0_0],
[3.6_1_6_9E-0_2, -5.9_0_2_5E0_0, -2.9_3_1_3E0_0],
[1.0_7_6_6E-0_4, -7.7_6_3_0E0_0, -5.1_2_6_3E0_0],
] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : Tuple = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" )
.to(UpperCamelCase_ )
.eval()
)
lowerCamelCase__ : Union[str, Any] = self.default_image_processor
lowerCamelCase__ : List[Any] = prepare_img()
lowerCamelCase__ : List[str] = image_processor(UpperCamelCase_ , return_tensors="pt" ).to(UpperCamelCase_ )
lowerCamelCase__ : Tuple = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCamelCase_ , (1, 3, 800, 1088) )
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(**UpperCamelCase_ )
# masks_queries_logits
lowerCamelCase__ : Union[str, Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowerCamelCase__ : List[Any] = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]]
lowerCamelCase__ : Union[str, Any] = torch.tensor(UpperCamelCase_ ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
# class_queries_logits
lowerCamelCase__ : List[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCamelCase__ : Optional[Any] = torch.tensor(
[[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(UpperCamelCase_ )
.eval()
)
lowerCamelCase__ : Optional[int] = self.default_image_processor
lowerCamelCase__ : Optional[Any] = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , )
lowerCamelCase__ : List[str] = inputs["pixel_values"].to(UpperCamelCase_ )
lowerCamelCase__ : List[Any] = [el.to(UpperCamelCase_ ) for el in inputs["mask_labels"]]
lowerCamelCase__ : int = [el.to(UpperCamelCase_ ) for el in inputs["class_labels"]]
with torch.no_grad():
lowerCamelCase__ : Dict = model(**UpperCamelCase_ )
self.assertTrue(outputs.loss is not None )
| 713 |
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
A : str = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
A : Optional[int] = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(10000):
out_file.write(data)
A : int = BeautifulSoup(res.text, "html.parser")
A : Any = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(f'https://google.com{link.get("href")}')
| 5 | 0 |
from __future__ import annotations
def a__ ( _UpperCamelCase : list[int] ):
if not nums:
return 0
__lowerCamelCase = nums[0]
__lowerCamelCase = 0
for num in nums[1:]:
__lowerCamelCase ,__lowerCamelCase = (
max_excluding + num,
max(_UpperCamelCase ,_UpperCamelCase ),
)
return max(_UpperCamelCase ,_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 175 |
import numpy
# List of input, output pairs
a_ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
a_ = (((515, 22, 13), 555), ((61, 35, 49), 150))
a_ = [2, 4, 1, 5]
a_ = len(train_data)
a_ = 0.0_09
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : List[Any]="train" ):
return calculate_hypothesis_value(_UpperCamelCase ,_UpperCamelCase ) - output(
_UpperCamelCase ,_UpperCamelCase )
def a__ ( _UpperCamelCase : Optional[Any] ):
__lowerCamelCase = 0
for i in range(len(_UpperCamelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def a__ ( _UpperCamelCase : Tuple ,_UpperCamelCase : List[Any] ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def a__ ( _UpperCamelCase : List[Any] ,_UpperCamelCase : Union[str, Any] ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def a__ ( _UpperCamelCase : Dict ,_UpperCamelCase : Dict=m ):
__lowerCamelCase = 0
for i in range(_UpperCamelCase ):
if index == -1:
summation_value += _error(_UpperCamelCase )
else:
summation_value += _error(_UpperCamelCase ) * train_data[i][0][index]
return summation_value
def a__ ( _UpperCamelCase : Optional[int] ):
__lowerCamelCase = summation_of_cost_derivative(_UpperCamelCase ,_UpperCamelCase ) / m
return cost_derivative_value
def a__ ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
__lowerCamelCase = 0.000_002
__lowerCamelCase = 0
__lowerCamelCase = 0
while True:
j += 1
__lowerCamelCase = [0, 0, 0, 0]
for i in range(0 ,len(_UpperCamelCase ) ):
__lowerCamelCase = get_cost_derivative(i - 1 )
__lowerCamelCase = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
_UpperCamelCase ,_UpperCamelCase ,atol=_UpperCamelCase ,rtol=_UpperCamelCase ,):
break
__lowerCamelCase = temp_parameter_vector
print(('''Number of iterations:''', j) )
def a__ ( ):
for i in range(len(_UpperCamelCase ) ):
print(('''Actual output value:''', output(_UpperCamelCase ,'''test''' )) )
print(('''Hypothesis output:''', calculate_hypothesis_value(_UpperCamelCase ,'''test''' )) )
if __name__ == "__main__":
run_gradient_descent()
print("""\nTesting gradient descent for a linear hypothesis function.\n""")
test_gradient_descent()
| 175 | 1 |
"""simple docstring"""
def UpperCamelCase ( _A , _A ) -> float:
if digit_amount > 0:
return round(number - int(_A ) , _A )
return number - int(_A )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 348 |
"""simple docstring"""
def UpperCamelCase ( _A , _A ) -> str:
lowercase : list[list[str]] = [[] for _ in range(_A )]
lowercase : Any = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1 or len(_A ) <= key:
return input_string
for position, character in enumerate(_A ):
lowercase : Optional[int] = position % (lowest * 2) # puts it in bounds
lowercase : Dict = min(_A , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(_A )
lowercase : Optional[Any] = ["""""".join(_A ) for row in temp_grid]
lowercase : int = """""".join(_A )
return output_string
def UpperCamelCase ( _A , _A ) -> str:
lowercase : Optional[Any] = []
lowercase : int = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1:
return input_string
lowercase : list[list[str]] = [[] for _ in range(_A )] # generates template
for position in range(len(_A ) ):
lowercase : Union[str, Any] = position % (lowest * 2) # puts it in bounds
lowercase : Union[str, Any] = min(_A , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("""*""" )
lowercase : Any = 0
for row in temp_grid: # fills in the characters
lowercase : Dict = input_string[counter : counter + len(_A )]
grid.append(list(_A ) )
counter += len(_A )
lowercase : Union[str, Any] = """""" # reads as zigzag
for position in range(len(_A ) ):
lowercase : List[str] = position % (lowest * 2) # puts it in bounds
lowercase : str = min(_A , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def UpperCamelCase ( _A ) -> dict[int, str]:
lowercase : int = {}
for key_guess in range(1 , len(_A ) ): # tries every key
lowercase : Dict = decrypt(_A , _A )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348 | 1 |
def _UpperCAmelCase ( UpperCAmelCase : int | float | str ):
"""simple docstring"""
try:
__lowerCamelCase : Dict = float(UpperCAmelCase )
except ValueError:
raise ValueError("""Please enter a valid number""" )
__lowerCamelCase : List[str] = decimal - int(UpperCAmelCase )
if fractional_part == 0:
return int(UpperCAmelCase ), 1
else:
__lowerCamelCase : Any = len(str(UpperCAmelCase ).split(""".""" )[1] )
__lowerCamelCase : Union[str, Any] = int(decimal * (10**number_of_frac_digits) )
__lowerCamelCase : str = 10**number_of_frac_digits
__lowerCamelCase , __lowerCamelCase : int = denominator, numerator
while True:
__lowerCamelCase : Optional[int] = dividend % divisor
if remainder == 0:
break
__lowerCamelCase , __lowerCamelCase : int = divisor, remainder
__lowerCamelCase , __lowerCamelCase : Optional[int] = numerator / divisor, denominator / divisor
return int(UpperCAmelCase ), int(UpperCAmelCase )
if __name__ == "__main__":
print(F'''{decimal_to_fraction(2) = }''')
print(F'''{decimal_to_fraction(89.0) = }''')
print(F'''{decimal_to_fraction('67') = }''')
print(F'''{decimal_to_fraction('45.0') = }''')
print(F'''{decimal_to_fraction(1.5) = }''')
print(F'''{decimal_to_fraction('6.25') = }''')
print(F'''{decimal_to_fraction('78td') = }''')
| 519 |
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
class _UpperCamelCase ( A ):
'''simple docstring'''
def _snake_case ( self : int , _lowerCamelCase : List[Any] ):
'''simple docstring'''
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__lowerCamelCase : Optional[int] = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__( self : Any , _lowerCamelCase : List[str] , _lowerCamelCase : Dict , _lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if len(_lowerCamelCase ) == 0 or len(_lowerCamelCase ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(_lowerCamelCase ) )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__lowerCamelCase : Optional[int] = [sequences]
__lowerCamelCase : List[str] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(_lowerCamelCase )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(A )
class _UpperCamelCase ( A ):
'''simple docstring'''
def __init__( self : List[str] , _lowerCamelCase : Tuple=ZeroShotClassificationArgumentHandler() , *_lowerCamelCase : int , **_lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = args_parser
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def _snake_case ( self : List[str] ):
'''simple docstring'''
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def _snake_case ( self : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Optional[Any]=True , _lowerCamelCase : Optional[int]=TruncationStrategy.ONLY_FIRST , **_lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowerCamelCase : List[str] = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
__lowerCamelCase : Union[str, Any] = self.tokenizer.eos_token
try:
__lowerCamelCase : Optional[Any] = self.tokenizer(
_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_tensors=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , )
except Exception as e:
if "too short" in str(_lowerCamelCase ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
__lowerCamelCase : int = self.tokenizer(
_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_tensors=_lowerCamelCase , padding=_lowerCamelCase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _snake_case ( self : Optional[int] , **_lowerCamelCase : Any ):
'''simple docstring'''
if kwargs.get("""multi_class""" , _lowerCamelCase ) is not None:
__lowerCamelCase : Any = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
__lowerCamelCase : str = {}
if "candidate_labels" in kwargs:
__lowerCamelCase : Tuple = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
__lowerCamelCase : List[Any] = kwargs["""hypothesis_template"""]
__lowerCamelCase : Tuple = {}
if "multi_label" in kwargs:
__lowerCamelCase : Dict = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__( self : Any , _lowerCamelCase : Union[str, List[str]] , *_lowerCamelCase : Dict , **_lowerCamelCase : Optional[int] , ):
'''simple docstring'''
if len(_lowerCamelCase ) == 0:
pass
elif len(_lowerCamelCase ) == 1 and "candidate_labels" not in kwargs:
__lowerCamelCase : str = args[0]
else:
raise ValueError(F"""Unable to understand extra arguments {args}""" )
return super().__call__(_lowerCamelCase , **_lowerCamelCase )
def _snake_case ( self : Dict , _lowerCamelCase : int , _lowerCamelCase : Tuple=None , _lowerCamelCase : List[Any]="This example is {}." ):
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = self._args_parser(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for i, (candidate_label, sequence_pair) in enumerate(zip(_lowerCamelCase , _lowerCamelCase ) ):
__lowerCamelCase : int = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(_lowerCamelCase ) - 1,
**model_input,
}
def _snake_case ( self : str , _lowerCamelCase : Any ):
'''simple docstring'''
__lowerCamelCase : Tuple = inputs["""candidate_label"""]
__lowerCamelCase : Union[str, Any] = inputs["""sequence"""]
__lowerCamelCase : Any = {k: inputs[k] for k in self.tokenizer.model_input_names}
__lowerCamelCase : Optional[int] = self.model(**_lowerCamelCase )
__lowerCamelCase : List[Any] = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def _snake_case ( self : Dict , _lowerCamelCase : List[Any] , _lowerCamelCase : str=False ):
'''simple docstring'''
__lowerCamelCase : List[Any] = [outputs["""candidate_label"""] for outputs in model_outputs]
__lowerCamelCase : Any = [outputs["""sequence"""] for outputs in model_outputs]
__lowerCamelCase : Dict = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
__lowerCamelCase : Union[str, Any] = logits.shape[0]
__lowerCamelCase : Optional[Any] = len(_lowerCamelCase )
__lowerCamelCase : Tuple = N // n
__lowerCamelCase : List[Any] = logits.reshape((num_sequences, n, -1) )
if multi_label or len(_lowerCamelCase ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
__lowerCamelCase : Dict = self.entailment_id
__lowerCamelCase : Optional[Any] = -1 if entailment_id == 0 else 0
__lowerCamelCase : Optional[int] = reshaped_outputs[..., [contradiction_id, entailment_id]]
__lowerCamelCase : Dict = np.exp(_lowerCamelCase ) / np.exp(_lowerCamelCase ).sum(-1 , keepdims=_lowerCamelCase )
__lowerCamelCase : Any = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
__lowerCamelCase : int = reshaped_outputs[..., self.entailment_id]
__lowerCamelCase : List[Any] = np.exp(_lowerCamelCase ) / np.exp(_lowerCamelCase ).sum(-1 , keepdims=_lowerCamelCase )
__lowerCamelCase : Dict = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 519 | 1 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ ):
_lowercase: Dict = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_lowercase: Any = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
_lowercase: Dict = f"{src_lang}-{tgt_lang}"
_lowercase: Union[str, Any] = f"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
_lowercase: int = os.path.join(__magic_name__ , "README.md" )
print(f"Generating {path}" )
with open(__magic_name__ , "w" , encoding="utf-8" ) as f:
f.write(__magic_name__ )
# make sure we are under the root of the project
_SCREAMING_SNAKE_CASE : List[str] = Path(__file__).resolve().parent.parent.parent
_SCREAMING_SNAKE_CASE : List[Any] = repo_dir / 'model_cards'
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = model_name.split('-')
_SCREAMING_SNAKE_CASE : Dict = model_cards_dir / 'facebook' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 206 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
_SCREAMING_SNAKE_CASE : int = get_logger(__name__)
class A :
'''simple docstring'''
lowerCamelCase : Dict = """dummy_data"""
lowerCamelCase : List[Any] = """datasets"""
lowerCamelCase : List[str] = False
def __init__( self : str , _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : Union[Version, str] , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : bool = False , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[List[Callable]] = None , ):
_lowercase: Dict = 0
_lowercase: Dict = dataset_name
_lowercase: Any = cache_dir
_lowercase: Union[str, Any] = use_local_dummy_data
_lowercase: Tuple = config
# download_callbacks take a single url as input
_lowercase: List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
_lowercase: Any = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
_lowercase: Optional[int] = str(_UpperCamelCase)
# to be downloaded
_lowercase: Dict = None
_lowercase: Dict = None
@property
def UpperCAmelCase__ ( self : str):
if self._dummy_file is None:
_lowercase: Any = self.download_dummy_data()
return self._dummy_file
@property
def UpperCAmelCase__ ( self : Tuple):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" , self.config.name , self.version_name)
# structure is dummy / version_name
return os.path.join("dummy" , self.version_name)
@property
def UpperCAmelCase__ ( self : Tuple):
return os.path.join(self.dummy_data_folder , "dummy_data.zip")
def UpperCAmelCase__ ( self : int):
_lowercase: List[Any] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
_lowercase: Dict = cached_path(
_UpperCamelCase , cache_dir=self.cache_dir , extract_compressed_file=_UpperCamelCase , force_extract=_UpperCamelCase)
return os.path.join(_UpperCamelCase , self.dummy_file_name)
@property
def UpperCAmelCase__ ( self : Optional[Any]):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file)
@property
def UpperCAmelCase__ ( self : List[str]):
if self._bucket_url is None:
_lowercase: Any = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/"))
return self._bucket_url
@property
def UpperCAmelCase__ ( self : Optional[int]):
# return full path if its a dir
if os.path.isdir(self.dummy_file):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , "/").split("/")[:-1])
def UpperCAmelCase__ ( self : List[str] , _UpperCamelCase : str , *_UpperCamelCase : Optional[Any]):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
_lowercase: int = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
_lowercase: List[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(_UpperCamelCase , _UpperCamelCase):
return self.create_dummy_data_dict(_UpperCamelCase , _UpperCamelCase)
elif isinstance(_UpperCamelCase , (list, tuple)):
return self.create_dummy_data_list(_UpperCamelCase , _UpperCamelCase)
else:
return self.create_dummy_data_single(_UpperCamelCase , _UpperCamelCase)
def UpperCAmelCase__ ( self : List[Any] , _UpperCamelCase : Union[str, Any] , *_UpperCamelCase : List[Any]):
return self.download_and_extract(_UpperCamelCase)
def UpperCAmelCase__ ( self : Optional[Any] , _UpperCamelCase : int , _UpperCamelCase : Any):
return self.download_and_extract(_UpperCamelCase)
def UpperCAmelCase__ ( self : Dict , _UpperCamelCase : str , *_UpperCamelCase : List[Any] , **_UpperCamelCase : int):
return path
def UpperCAmelCase__ ( self : Optional[int]):
return {}
def UpperCAmelCase__ ( self : str , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any]):
_lowercase: List[Any] = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(_UpperCamelCase , _UpperCamelCase):
for single_url in single_urls:
download_callback(_UpperCamelCase)
else:
_lowercase: Any = single_urls
download_callback(_UpperCamelCase)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(_UpperCamelCase , _UpperCamelCase):
_lowercase: List[str] = [os.path.join(_UpperCamelCase , urllib.parse.quote_plus(Path(_UpperCamelCase).name)) for x in single_urls]
else:
_lowercase: Any = single_urls
_lowercase: List[str] = os.path.join(_UpperCamelCase , urllib.parse.quote_plus(Path(_UpperCamelCase).name))
_lowercase: Union[str, Any] = value
# make sure that values are unique
if all(isinstance(_UpperCamelCase , _UpperCamelCase) for i in dummy_data_dict.values()) and len(set(dummy_data_dict.values())) < len(
dummy_data_dict.values()):
# append key to value to make its name unique
_lowercase: Optional[int] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def UpperCAmelCase__ ( self : str , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[str]):
_lowercase: Union[str, Any] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
_lowercase: Dict = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , _UpperCamelCase)) for url in data_url)
_lowercase: Tuple = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed") for url in data_url)
if data_url and (is_tf_records or is_pubmed_records):
_lowercase: List[str] = [data_url[0]] * len(_UpperCamelCase)
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(_UpperCamelCase)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_lowercase: Optional[Any] = os.path.join(_UpperCamelCase , urllib.parse.quote_plus(single_url.split("/")[-1]))
dummy_data_list.append(_UpperCamelCase)
return dummy_data_list
def UpperCAmelCase__ ( self : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict):
for download_callback in self.download_callbacks:
download_callback(_UpperCamelCase)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_lowercase: Tuple = os.path.join(_UpperCamelCase , urllib.parse.quote_plus(data_url.split("/")[-1]))
if os.path.exists(_UpperCamelCase) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def UpperCAmelCase__ ( self : Optional[int]):
pass
def UpperCAmelCase__ ( self : List[Any]):
pass
def UpperCAmelCase__ ( self : List[str] , _UpperCamelCase : Tuple):
def _iter_archive_members(_UpperCamelCase : Optional[int]):
# this preserves the order of the members inside the ZIP archive
_lowercase: str = Path(self.dummy_file).parent
_lowercase: Tuple = path.relative_to(_UpperCamelCase)
with ZipFile(self.local_path_to_dummy_data) as zip_file:
_lowercase: Any = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix()):
yield dummy_parent_path.joinpath(_UpperCamelCase)
_lowercase: Tuple = Path(_UpperCamelCase)
_lowercase: Optional[int] = _iter_archive_members(_UpperCamelCase) if self.use_local_dummy_data else path.rglob("*")
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__")):
yield file_path.relative_to(_UpperCamelCase).as_posix(), file_path.open("rb")
def UpperCAmelCase__ ( self : Union[str, Any] , _UpperCamelCase : Dict):
if not isinstance(_UpperCamelCase , _UpperCamelCase):
_lowercase: List[Any] = [paths]
for path in paths:
if os.path.isfile(_UpperCamelCase):
if os.path.basename(_UpperCamelCase).startswith((".", "__")):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(_UpperCamelCase):
if os.path.basename(_UpperCamelCase).startswith((".", "__")):
continue
dirnames.sort()
for filename in sorted(_UpperCamelCase):
if filename.startswith((".", "__")):
continue
yield os.path.join(_UpperCamelCase , _UpperCamelCase)
| 206 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class snake_case__ :
a_ = XGLMConfig
a_ = {}
a_ = "gelu"
def __init__( self : Optional[int] , _A : Dict , _A : str=14 , _A : Tuple=7 , _A : Optional[Any]=True , _A : List[Any]=True , _A : List[str]=True , _A : List[Any]=99 , _A : Dict=32 , _A : Optional[int]=2 , _A : Dict=4 , _A : int=37 , _A : Any="gelu" , _A : int=0.1 , _A : Dict=0.1 , _A : Optional[int]=5_12 , _A : Union[str, Any]=0.02 , ) -> List[str]:
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : Dict = batch_size
UpperCAmelCase_ : int = seq_length
UpperCAmelCase_ : List[Any] = is_training
UpperCAmelCase_ : Tuple = use_input_mask
UpperCAmelCase_ : List[Any] = use_labels
UpperCAmelCase_ : Optional[int] = vocab_size
UpperCAmelCase_ : Optional[int] = d_model
UpperCAmelCase_ : List[str] = num_hidden_layers
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : Any = ffn_dim
UpperCAmelCase_ : Optional[int] = activation_function
UpperCAmelCase_ : Optional[int] = activation_dropout
UpperCAmelCase_ : Union[str, Any] = attention_dropout
UpperCAmelCase_ : List[Any] = max_position_embeddings
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : Optional[int] = 2
UpperCAmelCase_ : int = 1
def A ( self : Optional[Any] ) -> int:
return XGLMConfig.from_pretrained('''facebook/xglm-564M''' )
def A ( self : Dict ) -> int:
UpperCAmelCase_ : Union[str, Any] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
UpperCAmelCase_ : Tuple = None
if self.use_input_mask:
UpperCAmelCase_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Optional[Any] = self.get_config()
UpperCAmelCase_ : int = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def A ( self : Union[str, Any] ) -> Dict:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=_A , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=_A , )
def A ( self : str ) -> int:
UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Optional[int] = config_and_inputs
UpperCAmelCase_ : List[Any] = {
'''input_ids''': input_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_tf
class snake_case__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
a_ = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
a_ = (TFXGLMForCausalLM,) if is_tf_available() else ()
a_ = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
a_ = False
a_ = False
a_ = False
def A ( self : str ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = TFXGLMModelTester(self )
UpperCAmelCase_ : str = ConfigTester(self , config_class=_A , n_embd=37 )
def A ( self : Dict ) -> List[Any]:
self.config_tester.run_common_tests()
@slow
def A ( self : Optional[int] ) -> Any:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : List[Any] = TFXGLMModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' )
def A ( self : int ) -> List[str]:
super().test_resize_token_embeddings()
@require_tf
class snake_case__ ( unittest.TestCase):
@slow
def A ( self : List[str] , _A : Dict=True ) -> Any:
UpperCAmelCase_ : str = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
UpperCAmelCase_ : Any = tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
UpperCAmelCase_ : Optional[Any] = [2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81]
# fmt: on
UpperCAmelCase_ : Tuple = model.generate(_A , do_sample=_A , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , _A )
@slow
def A ( self : List[Any] ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
UpperCAmelCase_ : Optional[int] = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
tf.random.set_seed(0 )
UpperCAmelCase_ : Tuple = tokenizer('''Today is a nice day and''' , return_tensors='''tf''' )
UpperCAmelCase_ : Optional[Any] = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(''':/CPU:0''' ):
UpperCAmelCase_ : Optional[int] = model.generate(_A , do_sample=_A , seed=[7, 0] )
UpperCAmelCase_ : List[str] = tokenizer.decode(output_ids[0] , skip_special_tokens=_A )
UpperCAmelCase_ : Dict = (
'''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'''
)
self.assertEqual(_A , _A )
@slow
def A ( self : List[str] ) -> Optional[Any]:
UpperCAmelCase_ : Union[str, Any] = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
UpperCAmelCase_ : Optional[int] = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
UpperCAmelCase_ : str = '''left'''
# use different length sentences to test batching
UpperCAmelCase_ : List[str] = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When''',
'''Hello, my dog is a little''',
]
UpperCAmelCase_ : Optional[int] = tokenizer(_A , return_tensors='''tf''' , padding=_A )
UpperCAmelCase_ : List[Any] = inputs['''input_ids''']
UpperCAmelCase_ : List[str] = model.generate(input_ids=_A , attention_mask=inputs['''attention_mask'''] , max_new_tokens=12 )
UpperCAmelCase_ : str = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
UpperCAmelCase_ : Dict = model.generate(input_ids=_A , max_new_tokens=12 )
UpperCAmelCase_ : Any = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
UpperCAmelCase_ : Union[str, Any] = model.generate(input_ids=_A , max_new_tokens=12 )
UpperCAmelCase_ : Optional[Any] = tokenizer.batch_decode(_A , skip_special_tokens=_A )
UpperCAmelCase_ : Optional[int] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_A )
UpperCAmelCase_ : Dict = tokenizer.decode(output_padded[0] , skip_special_tokens=_A )
UpperCAmelCase_ : List[str] = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '''
'''a single''',
'''Hello, my dog is a little bit of a shy one, but he is very friendly''',
]
self.assertListEqual(_A , _A )
self.assertListEqual(_A , [non_padded_sentence, padded_sentence] )
| 541 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class snake_case__ :
def __init__( self : Union[str, Any] , _A : Optional[Any] , _A : Dict=2 , _A : Optional[Any]=32 , _A : List[str]=16 , _A : str=3 , _A : List[Any]=True , _A : Optional[int]=True , _A : Optional[int]=32 , _A : Optional[Any]=4 , _A : Optional[int]=[0, 1, 2, 3] , _A : List[Any]=4 , _A : Optional[int]=37 , _A : Optional[Any]="gelu" , _A : Optional[Any]=0.1 , _A : Union[str, Any]=0.1 , _A : Dict=0.02 , _A : Optional[Any]=3 , _A : Union[str, Any]=[1, 3_84, 24, 24] , _A : int=True , _A : int=None , ) -> Tuple:
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : List[str] = batch_size
UpperCAmelCase_ : Union[str, Any] = image_size
UpperCAmelCase_ : List[Any] = patch_size
UpperCAmelCase_ : Dict = num_channels
UpperCAmelCase_ : str = is_training
UpperCAmelCase_ : List[str] = use_labels
UpperCAmelCase_ : Optional[Any] = hidden_size
UpperCAmelCase_ : str = num_hidden_layers
UpperCAmelCase_ : int = backbone_out_indices
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : Dict = hidden_dropout_prob
UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : int = num_labels
UpperCAmelCase_ : Union[str, Any] = backbone_featmap_shape
UpperCAmelCase_ : List[str] = scope
UpperCAmelCase_ : List[str] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ : List[Any] = (image_size // patch_size) ** 2
UpperCAmelCase_ : List[str] = num_patches + 1
def A ( self : Dict ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : List[str] = None
if self.use_labels:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase_ : Dict = self.get_config()
return config, pixel_values, labels
def A ( self : int ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 1_92, 3_84, 7_68],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=_A , backbone_featmap_shape=self.backbone_featmap_shape , )
def A ( self : Union[str, Any] , _A : List[Any] , _A : Optional[int] , _A : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = DPTModel(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : List[str] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Tuple , _A : str , _A : Tuple , _A : Union[str, Any] ) -> List[str]:
UpperCAmelCase_ : List[Any] = self.num_labels
UpperCAmelCase_ : Any = DPTForDepthEstimation(_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : int = model(_A )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def A ( self : List[str] , _A : str , _A : Union[str, Any] , _A : int ) -> Dict:
UpperCAmelCase_ : int = self.num_labels
UpperCAmelCase_ : Optional[Any] = DPTForSemanticSegmentation(_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : Tuple = model(_A , labels=_A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def A ( self : Dict ) -> Any:
UpperCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = config_and_inputs
UpperCAmelCase_ : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
a_ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
a_ = (
{
"depth-estimation": DPTForDepthEstimation,
"feature-extraction": DPTModel,
"image-segmentation": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a_ = False
a_ = False
a_ = False
def A ( self : Tuple ) -> Tuple:
UpperCAmelCase_ : Any = DPTModelTester(self )
UpperCAmelCase_ : Dict = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def A ( self : Optional[int] ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def A ( self : Union[str, Any] ) -> str:
pass
def A ( self : str ) -> Dict:
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Union[str, Any] = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase_ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , nn.Linear ) )
def A ( self : List[Any] ) -> Tuple:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Dict = model_class(_A )
UpperCAmelCase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : int = [*signature.parameters.keys()]
UpperCAmelCase_ : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def A ( self : List[str] ) -> str:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def A ( self : str ) -> List[str]:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_A )
def A ( self : str ) -> List[Any]:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_A )
def A ( self : Tuple ) -> str:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Union[str, Any] = True
if model_class in get_values(_A ):
continue
UpperCAmelCase_ : Optional[int] = model_class(_A )
model.to(_A )
model.train()
UpperCAmelCase_ : Any = self._prepare_for_class(_A , _A , return_labels=_A )
UpperCAmelCase_ : Tuple = model(**_A ).loss
loss.backward()
def A ( self : Union[str, Any] ) -> Tuple:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : Optional[Any] = True
if model_class in get_values(_A ) or not model_class.supports_gradient_checkpointing:
continue
UpperCAmelCase_ : int = model_class(_A )
model.to(_A )
model.gradient_checkpointing_enable()
model.train()
UpperCAmelCase_ : int = self._prepare_for_class(_A , _A , return_labels=_A )
UpperCAmelCase_ : List[str] = model(**_A ).loss
loss.backward()
def A ( self : Optional[Any] ) -> Any:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[str] = _config_zero_init(_A )
for model_class in self.all_model_classes:
UpperCAmelCase_ : int = model_class(config=_A )
# Skip the check for the backbone
UpperCAmelCase_ : List[str] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
UpperCAmelCase_ : Union[str, Any] = [F"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def A ( self : Tuple ) -> Optional[Any]:
pass
@slow
def A ( self : Any ) -> int:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
UpperCAmelCase_ : Any = DPTModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def A ( self : Union[str, Any] ) -> str:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[int] = '''add'''
with self.assertRaises(_A ):
UpperCAmelCase_ : Optional[Any] = DPTForDepthEstimation(_A )
def __UpperCAmelCase ( ) -> List[Any]:
UpperCAmelCase_ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class snake_case__ ( unittest.TestCase):
def A ( self : Union[str, Any] ) -> List[Any]:
UpperCAmelCase_ : List[Any] = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
UpperCAmelCase_ : Any = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(_A )
UpperCAmelCase_ : Tuple = prepare_img()
UpperCAmelCase_ : Union[str, Any] = image_processor(images=_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : str = model(**_A )
UpperCAmelCase_ : List[Any] = outputs.predicted_depth
# verify the predicted depth
UpperCAmelCase_ : str = torch.Size((1, 3_84, 3_84) )
self.assertEqual(predicted_depth.shape , _A )
UpperCAmelCase_ : Tuple = torch.tensor(
[[[5.6_437, 5.6_146, 5.6_511], [5.4_371, 5.5_649, 5.5_958], [5.5_215, 5.5_184, 5.5_293]]] ).to(_A )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_00 , _A , atol=1e-4 ) )
| 541 | 1 |
'''simple docstring'''
import numpy as np
UpperCAmelCase_ : List[Any] = [
["""a""", """b""", """c""", """d""", """e"""],
["""f""", """g""", """h""", """i""", """k"""],
["""l""", """m""", """n""", """o""", """p"""],
["""q""", """r""", """s""", """t""", """u"""],
["""v""", """w""", """x""", """y""", """z"""],
]
class lowercase__ :
def __init__( self ):
lowerCAmelCase_ : Dict = np.array(_lowercase )
def UpperCAmelCase__ ( self , _lowercase ):
lowerCAmelCase_ : Tuple = np.where(letter == self.SQUARE )
lowerCAmelCase_ : Any = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def UpperCAmelCase__ ( self , _lowercase , _lowercase ):
lowerCAmelCase_ : List[Any] = self.SQUARE[indexa - 1, indexa - 1]
return letter
def UpperCAmelCase__ ( self , _lowercase ):
lowerCAmelCase_ : List[str] = message.lower()
lowerCAmelCase_ : Any = message.replace(""" """ , """""" )
lowerCAmelCase_ : Union[str, Any] = message.replace("""j""" , """i""" )
lowerCAmelCase_ : Dict = np.empty((2, len(_lowercase )) )
for letter_index in range(len(_lowercase ) ):
lowerCAmelCase_ : Optional[int] = self.letter_to_numbers(message[letter_index] )
lowerCAmelCase_ : List[str] = numbers[0]
lowerCAmelCase_ : Optional[int] = numbers[1]
lowerCAmelCase_ : List[str] = first_step.reshape(2 * len(_lowercase ) )
lowerCAmelCase_ : List[Any] = """"""
for numbers_index in range(len(_lowercase ) ):
lowerCAmelCase_ : List[str] = int(second_step[numbers_index * 2] )
lowerCAmelCase_ : Dict = int(second_step[(numbers_index * 2) + 1] )
lowerCAmelCase_ : Tuple = self.numbers_to_letter(_lowercase , _lowercase )
lowerCAmelCase_ : Dict = encoded_message + letter
return encoded_message
def UpperCAmelCase__ ( self , _lowercase ):
lowerCAmelCase_ : str = message.lower()
message.replace(""" """ , """""" )
lowerCAmelCase_ : Optional[int] = np.empty(2 * len(_lowercase ) )
for letter_index in range(len(_lowercase ) ):
lowerCAmelCase_ : Optional[int] = self.letter_to_numbers(message[letter_index] )
lowerCAmelCase_ : List[Any] = numbers[0]
lowerCAmelCase_ : List[Any] = numbers[1]
lowerCAmelCase_ : Dict = first_step.reshape((2, len(_lowercase )) )
lowerCAmelCase_ : Any = """"""
for numbers_index in range(len(_lowercase ) ):
lowerCAmelCase_ : Optional[int] = int(second_step[0, numbers_index] )
lowerCAmelCase_ : Any = int(second_step[1, numbers_index] )
lowerCAmelCase_ : Optional[Any] = self.numbers_to_letter(_lowercase , _lowercase )
lowerCAmelCase_ : List[Any] = decoded_message + letter
return decoded_message
| 705 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : str = """▁"""
UpperCAmelCase_ : int = {"""vocab_file""": """sentencepiece.bpe.model"""}
UpperCAmelCase_ : Optional[Any] = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"""
),
}
}
UpperCAmelCase_ : Union[str, Any] = {
"""facebook/nllb-200-distilled-600M""": 10_24,
}
# fmt: off
UpperCAmelCase_ : Tuple = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class lowercase__ ( __A ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = ["""input_ids""", """attention_mask"""]
__UpperCamelCase = []
__UpperCamelCase = []
def __init__( self , _lowercase , _lowercase="<s>" , _lowercase="</s>" , _lowercase="</s>" , _lowercase="<s>" , _lowercase="<unk>" , _lowercase="<pad>" , _lowercase="<mask>" , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase = None , _lowercase=None , _lowercase=False , **_lowercase , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : Dict = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
lowerCAmelCase_ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCAmelCase_ : str = legacy_behaviour
super().__init__(
bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , tokenizer_file=_lowercase , src_lang=_lowercase , tgt_lang=_lowercase , additional_special_tokens=_lowercase , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=_lowercase , **_lowercase , )
lowerCAmelCase_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowercase ) )
lowerCAmelCase_ : int = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCAmelCase_ : Optional[Any] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCAmelCase_ : Dict = 1
lowerCAmelCase_ : Tuple = len(self.sp_model )
lowerCAmelCase_ : Any = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_lowercase )
}
lowerCAmelCase_ : Optional[int] = {v: k for k, v in self.lang_code_to_id.items()}
lowerCAmelCase_ : List[str] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowerCAmelCase_ : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowerCAmelCase_ : Tuple = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
lowerCAmelCase_ : str = src_lang if src_lang is not None else """eng_Latn"""
lowerCAmelCase_ : List[str] = self.lang_code_to_id[self._src_lang]
lowerCAmelCase_ : Union[str, Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
lowerCAmelCase_ : int = self.__dict__.copy()
lowerCAmelCase_ : List[Any] = None
lowerCAmelCase_ : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , _lowercase ):
lowerCAmelCase_ : Optional[int] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCAmelCase_ : Optional[Any] = {}
lowerCAmelCase_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def UpperCAmelCase__ ( self ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def UpperCAmelCase__ ( self ):
return self._src_lang
@src_lang.setter
def UpperCAmelCase__ ( self , _lowercase ):
lowerCAmelCase_ : Optional[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None , _lowercase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
lowerCAmelCase_ : Optional[int] = [1] * len(self.prefix_tokens )
lowerCAmelCase_ : str = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_lowercase )) + suffix_ones
return prefix_ones + ([0] * len(_lowercase )) + ([0] * len(_lowercase )) + suffix_ones
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ):
lowerCAmelCase_ : Dict = [self.sep_token_id]
lowerCAmelCase_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , **_lowercase ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
lowerCAmelCase_ : Any = src_lang
lowerCAmelCase_ : str = self(_lowercase , add_special_tokens=_lowercase , return_tensors=_lowercase , **_lowercase )
lowerCAmelCase_ : str = self.convert_tokens_to_ids(_lowercase )
lowerCAmelCase_ : Optional[Any] = tgt_lang_id
return inputs
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Union[str, Any] = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase__ ( self , _lowercase ):
return self.sp_model.encode(_lowercase , out_type=_lowercase )
def UpperCAmelCase__ ( self , _lowercase ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCAmelCase_ : Dict = self.sp_model.PieceToId(_lowercase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase__ ( self , _lowercase ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase__ ( self , _lowercase ):
lowerCAmelCase_ : List[Any] = """""".join(_lowercase ).replace(_lowercase , """ """ ).strip()
return out_string
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ):
if not os.path.isdir(_lowercase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase_ : List[str] = os.path.join(
_lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowercase , """wb""" ) as fi:
lowerCAmelCase_ : int = self.sp_model.serialized_model_proto()
fi.write(_lowercase )
return (out_vocab_file,)
def UpperCAmelCase__ ( self , _lowercase , _lowercase = "eng_Latn" , _lowercase = None , _lowercase = "fra_Latn" , **_lowercase , ):
lowerCAmelCase_ : Optional[int] = src_lang
lowerCAmelCase_ : Dict = tgt_lang
return super().prepare_seqaseq_batch(_lowercase , _lowercase , **_lowercase )
def UpperCAmelCase__ ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCAmelCase__ ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCAmelCase__ ( self , _lowercase ):
lowerCAmelCase_ : Dict = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
lowerCAmelCase_ : Any = []
lowerCAmelCase_ : Dict = [self.eos_token_id, self.cur_lang_code]
else:
lowerCAmelCase_ : Any = [self.cur_lang_code]
lowerCAmelCase_ : List[str] = [self.eos_token_id]
def UpperCAmelCase__ ( self , _lowercase ):
lowerCAmelCase_ : Any = self.lang_code_to_id[lang]
if self.legacy_behaviour:
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : Dict = [self.eos_token_id, self.cur_lang_code]
else:
lowerCAmelCase_ : List[str] = [self.cur_lang_code]
lowerCAmelCase_ : List[Any] = [self.eos_token_id]
| 440 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ : List[Any] = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : List[Any] = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Tuple = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : int = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Union[str, Any] = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Union[str, Any] = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 673 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class UpperCamelCase_ ( a_ ):
_A : Optional[int] = 'facebook/bart-large-mnli'
_A : Union[str, Any] = (
'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '
'should be the text to classify, and `labels`, which should be the list of labels to use for classification. '
'It returns the most likely label in the list of provided `labels` for the input text.'
)
_A : Dict = 'text_classifier'
_A : Union[str, Any] = AutoTokenizer
_A : Tuple = AutoModelForSequenceClassification
_A : Optional[int] = ['text', ['text']]
_A : Dict = ['text']
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
super().setup()
UpperCAmelCase = self.model.config
UpperCAmelCase = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
UpperCAmelCase = int(snake_case__ )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = labels
return self.pre_processor(
[text] * len(snake_case__ ) , [f'''This example is {label}''' for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def UpperCamelCase_ ( self , snake_case__ ) -> str:
"""simple docstring"""
UpperCAmelCase = outputs.logits
UpperCAmelCase = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 673 | 1 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowercase_ ( self ):
A_ , A_ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=__UpperCamelCase , dtype=jnp.bfloataa )
A_ , A_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=__UpperCamelCase , from_pt=__UpperCamelCase , dtype=jnp.bfloataa )
A_ = controlnet_params
A_ = "bird"
A_ = jax.device_count()
A_ = pipe.prepare_text_inputs([prompts] * num_samples )
A_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" )
A_ = pipe.prepare_image_inputs([canny_image] * num_samples )
A_ = jax.random.PRNGKey(0 )
A_ = jax.random.split(__UpperCamelCase , jax.device_count() )
A_ = replicate(__UpperCamelCase )
A_ = shard(__UpperCamelCase )
A_ = shard(__UpperCamelCase )
A_ = pipe(
prompt_ids=__UpperCamelCase , image=__UpperCamelCase , params=__UpperCamelCase , prng_seed=__UpperCamelCase , num_inference_steps=50 , jit=__UpperCamelCase , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
A_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
A_ = images[0, 253:256, 253:256, -1]
A_ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
A_ = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowercase_ ( self ):
A_ , A_ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=__UpperCamelCase , dtype=jnp.bfloataa )
A_ , A_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=__UpperCamelCase , from_pt=__UpperCamelCase , dtype=jnp.bfloataa )
A_ = controlnet_params
A_ = "Chef in the kitchen"
A_ = jax.device_count()
A_ = pipe.prepare_text_inputs([prompts] * num_samples )
A_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" )
A_ = pipe.prepare_image_inputs([pose_image] * num_samples )
A_ = jax.random.PRNGKey(0 )
A_ = jax.random.split(__UpperCamelCase , jax.device_count() )
A_ = replicate(__UpperCamelCase )
A_ = shard(__UpperCamelCase )
A_ = shard(__UpperCamelCase )
A_ = pipe(
prompt_ids=__UpperCamelCase , image=__UpperCamelCase , params=__UpperCamelCase , prng_seed=__UpperCamelCase , num_inference_steps=50 , jit=__UpperCamelCase , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
A_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
A_ = images[0, 253:256, 253:256, -1]
A_ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
A_ = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 712 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
__magic_name__ : Optional[Any] = logging.get_logger(__name__)
__magic_name__ : Tuple = {
'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class lowerCamelCase ( __snake_case ):
"""simple docstring"""
lowerCAmelCase_ = """umt5"""
lowerCAmelCase_ = ["""past_key_values"""]
def __init__( self , __UpperCamelCase=250112 , __UpperCamelCase=512 , __UpperCamelCase=64 , __UpperCamelCase=1024 , __UpperCamelCase=8 , __UpperCamelCase=None , __UpperCamelCase=6 , __UpperCamelCase=32 , __UpperCamelCase=128 , __UpperCamelCase=0.1 , __UpperCamelCase=1E-6 , __UpperCamelCase=1.0 , __UpperCamelCase="gated-gelu" , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase="T5Tokenizer" , __UpperCamelCase=True , __UpperCamelCase=0 , __UpperCamelCase=1 , __UpperCamelCase=0 , **__UpperCamelCase , ):
super().__init__(
is_encoder_decoder=__UpperCamelCase , tokenizer_class=__UpperCamelCase , tie_word_embeddings=__UpperCamelCase , pad_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , **__UpperCamelCase , )
A_ = vocab_size
A_ = d_model
A_ = d_kv
A_ = d_ff
A_ = num_layers
A_ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
A_ = num_heads
A_ = relative_attention_num_buckets
A_ = relative_attention_max_distance
A_ = dropout_rate
A_ = layer_norm_epsilon
A_ = initializer_factor
A_ = feed_forward_proj
A_ = use_cache
A_ = self.feed_forward_proj.split("-" )
A_ = act_info[-1]
A_ = act_info[0] == "gated"
if len(__UpperCamelCase ) > 1 and act_info[0] != "gated" or len(__UpperCamelCase ) > 2:
raise ValueError(
f'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
if feed_forward_proj == "gated-gelu":
A_ = "gelu_new"
@property
def lowercase_ ( self ):
return self.d_model
@property
def lowercase_ ( self ):
return self.num_heads
@property
def lowercase_ ( self ):
return self.num_layers
class lowerCamelCase ( __snake_case ):
"""simple docstring"""
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def lowercase_ ( self ):
A_ = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
A_ = "past_encoder_sequence + sequence"
A_ = {0: "batch"}
A_ = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
A_ = {0: "batch", 1: "decoder_sequence"}
A_ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__UpperCamelCase , direction="inputs" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def lowercase_ ( self ):
return 13
@property
def lowercase_ ( self ):
return 5E-4
| 608 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.