code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
lowerCAmelCase__ : List[str] =None
lowerCAmelCase__ : str =logging.get_logger(__name__)
lowerCAmelCase__ : Any ={'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ : List[Any] ={
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
lowerCAmelCase__ : Any ={
'google/fnet-base': 5_12,
'google/fnet-large': 5_12,
}
lowerCAmelCase__ : Optional[Any] ='▁'
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ["""input_ids""", """token_type_ids"""]
_UpperCAmelCase = FNetTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__="<unk>" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[MASK]" , **lowerCAmelCase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = (
AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ , normalized=lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
else mask_token
)
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , remove_space=lowerCAmelCase__ , keep_accents=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_ : Optional[int] = do_lower_case
SCREAMING_SNAKE_CASE_ : int = remove_space
SCREAMING_SNAKE_CASE_ : Optional[int] = keep_accents
SCREAMING_SNAKE_CASE_ : Any = vocab_file
SCREAMING_SNAKE_CASE_ : Optional[Any] = False if not self.vocab_file else True
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(
lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 101 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCamelCase ( lowercase , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = GPTaTokenizer
SCREAMING_SNAKE_CASE__ = GPTaTokenizerFast
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = {'add_prefix_space': True}
SCREAMING_SNAKE_CASE__ = False
def __A ( self : Dict ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
UpperCAmelCase_ = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) )
UpperCAmelCase_ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCAmelCase_ = {"unk_token": "<unk>"}
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase ) )
def __A ( self : Optional[int] , **lowerCAmelCase : List[str] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def __A ( self : Union[str, Any] , **lowerCAmelCase : Tuple ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def __A ( self : Optional[Any] , lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase_ = "lower newer"
UpperCAmelCase_ = "lower newer"
return input_text, output_text
def __A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase_ = "lower newer"
UpperCAmelCase_ = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
UpperCAmelCase_ = tokenizer.tokenize(lowerCAmelCase , add_prefix_space=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = tokens + [tokenizer.unk_token]
UpperCAmelCase_ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , lowerCAmelCase )
def __A ( self : str ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase )
UpperCAmelCase_ = "lower newer"
# Testing tokenization
UpperCAmelCase_ = tokenizer.tokenize(lowerCAmelCase , add_prefix_space=lowerCAmelCase )
UpperCAmelCase_ = rust_tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
# Testing conversion to ids without special tokens
UpperCAmelCase_ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase , add_prefix_space=lowerCAmelCase )
UpperCAmelCase_ = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
# Testing conversion to ids with special tokens
UpperCAmelCase_ = self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase )
UpperCAmelCase_ = tokenizer.encode(lowerCAmelCase , add_prefix_space=lowerCAmelCase )
UpperCAmelCase_ = rust_tokenizer.encode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
# Testing the unknown token
UpperCAmelCase_ = tokens + [rust_tokenizer.unk_token]
UpperCAmelCase_ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , lowerCAmelCase )
def __A ( self : Optional[int] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : int ):
'''simple docstring'''
pass
def __A ( self : str , lowerCAmelCase : List[str]=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
# Simple input
UpperCAmelCase_ = "This is a simple input"
UpperCAmelCase_ = ["This is a simple input 1", "This is a simple input 2"]
UpperCAmelCase_ = ("This is a simple input", "This is a pair")
UpperCAmelCase_ = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(lowerCAmelCase , tokenizer_r.encode , lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" )
# Simple input
self.assertRaises(lowerCAmelCase , tokenizer_r.encode_plus , lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" )
# Simple input
self.assertRaises(
lowerCAmelCase , tokenizer_r.batch_encode_plus , lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" , )
# Pair input
self.assertRaises(lowerCAmelCase , tokenizer_r.encode , lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" )
# Pair input
self.assertRaises(lowerCAmelCase , tokenizer_r.encode_plus , lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" )
# Pair input
self.assertRaises(
lowerCAmelCase , tokenizer_r.batch_encode_plus , lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" , )
def __A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
UpperCAmelCase_ = "This is a simple input"
UpperCAmelCase_ = ["This is a simple input looooooooong", "This is a simple input"]
UpperCAmelCase_ = ("This is a simple input", "This is a pair")
UpperCAmelCase_ = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
UpperCAmelCase_ = tokenizer.pad_token_id
UpperCAmelCase_ = tokenizer(lowerCAmelCase , padding="max_length" , max_length=30 , return_tensors="np" )
UpperCAmelCase_ = tokenizer(lowerCAmelCase , padding=lowerCAmelCase , truncate=lowerCAmelCase , return_tensors="np" )
UpperCAmelCase_ = tokenizer(*lowerCAmelCase , padding="max_length" , max_length=60 , return_tensors="np" )
UpperCAmelCase_ = tokenizer(lowerCAmelCase , padding=lowerCAmelCase , truncate=lowerCAmelCase , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def __A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ = "$$$"
UpperCAmelCase_ = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=lowerCAmelCase , add_bos_token=lowerCAmelCase )
UpperCAmelCase_ = "This is a simple input"
UpperCAmelCase_ = ["This is a simple input 1", "This is a simple input 2"]
UpperCAmelCase_ = tokenizer.bos_token_id
UpperCAmelCase_ = tokenizer(lowerCAmelCase )
UpperCAmelCase_ = tokenizer(lowerCAmelCase )
self.assertEqual(out_s.input_ids[0] , lowerCAmelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
UpperCAmelCase_ = tokenizer.decode(out_s.input_ids )
UpperCAmelCase_ = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , lowerCAmelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def __A ( self : int ):
'''simple docstring'''
pass
def __A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ = [self.get_tokenizer(do_lower_case=lowerCAmelCase , add_bos_token=lowerCAmelCase )]
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
UpperCAmelCase_ = "Encode this."
UpperCAmelCase_ = "This one too please."
UpperCAmelCase_ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
encoded_sequence += tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
UpperCAmelCase_ = tokenizer.encode_plus(
lowerCAmelCase , lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_special_tokens_mask=lowerCAmelCase , )
UpperCAmelCase_ = encoded_sequence_dict["input_ids"]
UpperCAmelCase_ = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
UpperCAmelCase_ = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(lowerCAmelCase )
]
UpperCAmelCase_ = [x for x in filtered_sequence if x is not None]
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
@require_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
def __A ( self : int ):
'''simple docstring'''
UpperCAmelCase_ = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=lowerCAmelCase )
UpperCAmelCase_ = "A photo of a cat"
UpperCAmelCase_ = tokenizer.encode(
lowerCAmelCase , )
self.assertEqual(lowerCAmelCase , [2, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained("test_opt" )
UpperCAmelCase_ = AutoTokenizer.from_pretrained("./test_opt" )
UpperCAmelCase_ = tokenizer.encode(
lowerCAmelCase , )
self.assertEqual(lowerCAmelCase , [2, 250, 1_345, 9, 10, 4_758] )
def __A ( self : int ):
'''simple docstring'''
UpperCAmelCase_ = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=lowerCAmelCase )
UpperCAmelCase_ = "A photo of a cat"
UpperCAmelCase_ = tokenizer.encode(
lowerCAmelCase , )
# Same as above
self.assertEqual(lowerCAmelCase , [2, 250, 1_345, 9, 10, 4_758] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def __A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=lowerCAmelCase )
UpperCAmelCase_ = "bos"
UpperCAmelCase_ = tokenizer.get_vocab()["bos"]
UpperCAmelCase_ = "A photo of a cat"
UpperCAmelCase_ = tokenizer.encode(
lowerCAmelCase , )
# We changed the bos token
self.assertEqual(lowerCAmelCase , [31_957, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained("./tok" )
UpperCAmelCase_ = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
UpperCAmelCase_ = tokenizer.encode(
lowerCAmelCase , )
self.assertEqual(lowerCAmelCase , [31_957, 250, 1_345, 9, 10, 4_758] ) | 162 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=30 , lowerCAmelCase_=2 , lowerCAmelCase_=3 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=10 , lowerCAmelCase_=0.02 , lowerCAmelCase_=None , lowerCAmelCase_=2 , ):
'''simple docstring'''
a_ : Optional[Any] = parent
a_ : Dict = batch_size
a_ : Optional[Any] = image_size
a_ : Any = patch_size
a_ : str = num_channels
a_ : Union[str, Any] = is_training
a_ : Tuple = use_labels
a_ : Tuple = hidden_size
a_ : Any = num_hidden_layers
a_ : Optional[Any] = num_attention_heads
a_ : List[str] = intermediate_size
a_ : Tuple = hidden_act
a_ : List[str] = hidden_dropout_prob
a_ : List[str] = attention_probs_dropout_prob
a_ : str = type_sequence_label_size
a_ : List[str] = initializer_range
a_ : Optional[Any] = scope
a_ : str = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a_ : Any = (image_size // patch_size) ** 2
a_ : int = num_patches + 1
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ : List[str] = None
if self.use_labels:
a_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self ):
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
a_ : Union[str, Any] = ViTModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
a_ : Any = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
a_ : str = ViTForMaskedImageModeling(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
a_ : List[Any] = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
a_ : Optional[Any] = 1
a_ : Union[str, Any] = ViTForMaskedImageModeling(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
a_ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a_ : List[str] = model(lowerCAmelCase_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
a_ : Optional[Any] = self.type_sequence_label_size
a_ : Union[str, Any] = ViTForImageClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
a_ : str = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
a_ : Optional[int] = 1
a_ : Tuple = ViTForImageClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
a_ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a_ : Tuple = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[Any] = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) ,
) : Union[str, Any] = config_and_inputs
a_ : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,unittest.TestCase ):
"""simple docstring"""
a_ = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
a_ = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
a_ = True
a_ = False
a_ = False
a_ = False
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Any = ViTModelTester(self )
a_ : Dict = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 )
def _lowerCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def _lowerCAmelCase ( self ):
'''simple docstring'''
pass
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ , a_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : Optional[int] = model_class(lowerCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a_ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear ) )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ , a_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : Union[str, Any] = model_class(lowerCAmelCase_ )
a_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ : str = [*signature.parameters.keys()]
a_ : Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase_ )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@slow
def _lowerCAmelCase ( self ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : List[str] = ViTModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def _snake_case ( ):
"""simple docstring"""
a_ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCAmelCase ( self ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None
@slow
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : List[Any] = ViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" ).to(lowerCAmelCase_ )
a_ : Union[str, Any] = self.default_image_processor
a_ : List[Any] = prepare_img()
a_ : Union[str, Any] = image_processor(images=lowerCAmelCase_ , return_tensors="""pt""" ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
a_ : List[str] = model(**lowerCAmelCase_ )
# verify the logits
a_ : List[Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
a_ : int = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) )
@slow
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : List[str] = ViTModel.from_pretrained("""facebook/dino-vits8""" ).to(lowerCAmelCase_ )
a_ : int = ViTImageProcessor.from_pretrained("""facebook/dino-vits8""" , size=4_80 )
a_ : int = prepare_img()
a_ : Tuple = image_processor(images=lowerCAmelCase_ , return_tensors="""pt""" )
a_ : Union[str, Any] = inputs.pixel_values.to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
a_ : int = model(lowerCAmelCase_ , interpolate_pos_encoding=lowerCAmelCase_ )
# verify the logits
a_ : Optional[int] = torch.Size((1, 36_01, 3_84) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase_ )
a_ : Any = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase_ , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[int] = ViTModel.from_pretrained("""facebook/dino-vits8""" , torch_dtype=torch.floataa , device_map="""auto""" )
a_ : Any = self.default_image_processor
a_ : Union[str, Any] = prepare_img()
a_ : Optional[int] = image_processor(images=lowerCAmelCase_ , return_tensors="""pt""" )
a_ : List[Any] = inputs.pixel_values.to(lowerCAmelCase_ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
a_ : str = model(lowerCAmelCase_ )
| 460 |
'''simple docstring'''
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
a_ = 42
a_ = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 460 | 1 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = """char"""
A_ = """bpe"""
A_ = """wp"""
snake_case__ : Union[str, Any] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = ["""image_processor""", """char_tokenizer"""]
A_ = """ViTImageProcessor"""
A_ = """MgpstrTokenizer"""
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ) -> List[Any]:
UpperCamelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a_ , )
UpperCamelCase_ = kwargs.pop('feature_extractor' )
UpperCamelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
UpperCamelCase_ = tokenizer
UpperCamelCase_ = AutoTokenizer.from_pretrained('gpt2' )
UpperCamelCase_ = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(a_ , a_ )
def __call__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ) -> List[Any]:
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
UpperCamelCase_ = self.image_processor(a_ , return_tensors=a_ , **a_ )
if text is not None:
UpperCamelCase_ = self.char_tokenizer(a_ , return_tensors=a_ , **a_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
UpperCamelCase_ = encodings['input_ids']
return inputs
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> int:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = sequences
UpperCamelCase_ = char_preds.size(0 )
UpperCamelCase_ , UpperCamelCase_ = self._decode_helper(a_ , 'char' )
UpperCamelCase_ , UpperCamelCase_ = self._decode_helper(a_ , 'bpe' )
UpperCamelCase_ , UpperCamelCase_ = self._decode_helper(a_ , 'wp' )
UpperCamelCase_ = []
UpperCamelCase_ = []
for i in range(a_ ):
UpperCamelCase_ = [char_scores[i], bpe_scores[i], wp_scores[i]]
UpperCamelCase_ = [char_strs[i], bpe_strs[i], wp_strs[i]]
UpperCamelCase_ = scores.index(max(a_ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
UpperCamelCase_ = {}
UpperCamelCase_ = final_strs
UpperCamelCase_ = final_scores
UpperCamelCase_ = char_strs
UpperCamelCase_ = bpe_strs
UpperCamelCase_ = wp_strs
return out
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
if format == DecodeType.CHARACTER:
UpperCamelCase_ = self.char_decode
UpperCamelCase_ = 1
UpperCamelCase_ = '[s]'
elif format == DecodeType.BPE:
UpperCamelCase_ = self.bpe_decode
UpperCamelCase_ = 2
UpperCamelCase_ = '#'
elif format == DecodeType.WORDPIECE:
UpperCamelCase_ = self.wp_decode
UpperCamelCase_ = 102
UpperCamelCase_ = '[SEP]'
else:
raise ValueError(f"""Format {format} is not supported.""" )
UpperCamelCase_ , UpperCamelCase_ = [], []
UpperCamelCase_ = pred_logits.size(0 )
UpperCamelCase_ = pred_logits.size(1 )
UpperCamelCase_ , UpperCamelCase_ = pred_logits.topk(1 , dim=-1 , largest=a_ , sorted=a_ )
UpperCamelCase_ = preds_index.view(-1 , a_ )[:, 1:]
UpperCamelCase_ = decoder(a_ )
UpperCamelCase_ , UpperCamelCase_ = torch.nn.functional.softmax(a_ , dim=2 ).max(dim=2 )
UpperCamelCase_ = preds_max_prob[:, 1:]
for index in range(a_ ):
UpperCamelCase_ = preds_str[index].find(a_ )
UpperCamelCase_ = preds_str[index][:pred_eos]
UpperCamelCase_ = preds_index[index].cpu().tolist()
UpperCamelCase_ = pred_index.index(a_ ) if eos_token in pred_index else -1
UpperCamelCase_ = preds_max_prob[index][: pred_eos_index + 1]
UpperCamelCase_ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(a_ )
conf_scores.append(a_ )
return dec_strs, conf_scores
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Any:
UpperCamelCase_ = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(a_ )]
return decode_strs
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Optional[int]:
return self.bpe_tokenizer.batch_decode(a_ )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Dict:
UpperCamelCase_ = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(a_ )]
return decode_strs
| 23 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class __snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase__ = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest']
# fmt: on
lowerCAmelCase__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
lowerCAmelCase__ = {
'do_resize': True,
'size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.5, 0.5, 0.5],
'image_std': [0.5, 0.5, 0.5],
}
lowerCAmelCase__ = os.path.join(self.tmpdirname ,a_ )
with open(self.image_processor_file ,'w' ,encoding='utf-8' ) as fp:
json.dump(a_ ,a_ )
def SCREAMING_SNAKE_CASE_ ( self ,**a_ ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname ,**a_ )
def SCREAMING_SNAKE_CASE_ ( self ,**a_ ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname ,**a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
lowerCAmelCase__ = [Image.fromarray(np.moveaxis(a_ ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=a_ ,image_processor=a_ )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor ,a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' )
lowerCAmelCase__ = self.get_image_processor(do_normalize=a_ ,padding_value=1.0 )
lowerCAmelCase__ = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname ,bos_token='(BOS)' ,eos_token='(EOS)' ,do_normalize=a_ ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=a_ ,image_processor=a_ )
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = image_processor(a_ ,return_tensors='np' )
lowerCAmelCase__ = processor(images=a_ ,return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=a_ ,image_processor=a_ )
lowerCAmelCase__ = 'lower newer'
lowerCAmelCase__ = processor(text=a_ )
lowerCAmelCase__ = tokenizer(a_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=a_ ,image_processor=a_ )
lowerCAmelCase__ = 'lower newer'
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(text=a_ ,images=a_ )
self.assertListEqual(list(inputs.keys() ) ,['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with self.assertRaises(a_ ):
processor()
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=a_ ,image_processor=a_ )
lowerCAmelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase__ = processor.batch_decode(a_ )
lowerCAmelCase__ = tokenizer.batch_decode(a_ )
self.assertListEqual(a_ ,a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=a_ ,image_processor=a_ )
lowerCAmelCase__ = 'lower newer'
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(text=a_ ,images=a_ )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
| 193 | 0 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('''>=''', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
A__: List[Any] = get_logger(__name__)
def lowerCAmelCase_ ( A_ ,A_ ,A_ ,A_ ,A_=0):
os.makedirs(A_ ,exist_ok=A_)
with FSDP.state_dict_type(
A_ ,fsdp_plugin.state_dict_type ,fsdp_plugin.state_dict_config ,fsdp_plugin.optim_state_dict_config):
UpperCamelCase__: List[str] = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
UpperCamelCase__: List[str] = F"{MODEL_NAME}.bin" if model_index == 0 else F"{MODEL_NAME}_{model_index}.bin"
UpperCamelCase__: Dict = os.path.join(A_ ,A_)
if accelerator.process_index == 0:
logger.info(F"Saving model to {output_model_file}")
torch.save(A_ ,A_)
logger.info(F"Model saved to {output_model_file}")
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
UpperCamelCase__: List[Any] = (
F"{MODEL_NAME}_rank{accelerator.process_index}.bin"
if model_index == 0
else F"{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"
)
UpperCamelCase__: Optional[Any] = os.path.join(A_ ,A_)
logger.info(F"Saving model to {output_model_file}")
torch.save(A_ ,A_)
logger.info(F"Model saved to {output_model_file}")
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
UpperCamelCase__: Optional[int] = os.path.join(A_ ,F"{MODEL_NAME}_{model_index}")
os.makedirs(A_ ,exist_ok=A_)
logger.info(F"Saving model to {ckpt_dir}")
UpperCamelCase__: Dict = {"model": state_dict}
dist_cp.save_state_dict(
state_dict=A_ ,storage_writer=dist_cp.FileSystemWriter(A_) ,planner=DefaultSavePlanner() ,)
logger.info(F"Model saved to {ckpt_dir}")
def lowerCAmelCase_ ( A_ ,A_ ,A_ ,A_ ,A_=0):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A_ ,fsdp_plugin.state_dict_type ,fsdp_plugin.state_dict_config ,fsdp_plugin.optim_state_dict_config):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(A_) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"Set the `sync_module_states` flag to `True` so that model states are synced across processes when "
"initializing FSDP object")
return
UpperCamelCase__: int = F"{MODEL_NAME}.bin" if model_index == 0 else F"{MODEL_NAME}_{model_index}.bin"
UpperCamelCase__: List[str] = os.path.join(A_ ,A_)
logger.info(F"Loading model from {input_model_file}")
UpperCamelCase__: Any = torch.load(A_)
logger.info(F"Model loaded from {input_model_file}")
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
UpperCamelCase__: List[Any] = (
F"{MODEL_NAME}_rank{accelerator.process_index}.bin"
if model_index == 0
else F"{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"
)
UpperCamelCase__: str = os.path.join(A_ ,A_)
logger.info(F"Loading model from {input_model_file}")
UpperCamelCase__: Any = torch.load(A_)
logger.info(F"Model loaded from {input_model_file}")
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
UpperCamelCase__: Optional[Any] = (
os.path.join(A_ ,F"{MODEL_NAME}_{model_index}")
if F"{MODEL_NAME}" not in input_dir
else input_dir
)
logger.info(F"Loading model from {ckpt_dir}")
UpperCamelCase__: Optional[int] = {"model": model.state_dict()}
dist_cp.load_state_dict(
state_dict=A_ ,storage_reader=dist_cp.FileSystemReader(A_) ,planner=DefaultLoadPlanner() ,)
UpperCamelCase__: Any = state_dict["model"]
logger.info(F"Model loaded from {ckpt_dir}")
model.load_state_dict(A_)
def lowerCAmelCase_ ( A_ ,A_ ,A_ ,A_ ,A_ ,A_=0):
os.makedirs(A_ ,exist_ok=A_)
with FSDP.state_dict_type(
A_ ,fsdp_plugin.state_dict_type ,fsdp_plugin.state_dict_config ,fsdp_plugin.optim_state_dict_config):
UpperCamelCase__: str = FSDP.optim_state_dict(A_ ,A_)
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
UpperCamelCase__: List[str] = (
F"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else F"{OPTIMIZER_NAME}_{optimizer_index}.bin"
)
UpperCamelCase__: Any = os.path.join(A_ ,A_)
logger.info(F"Saving Optimizer state to {output_optimizer_file}")
torch.save(A_ ,A_)
logger.info(F"Optimizer state saved in {output_optimizer_file}")
else:
UpperCamelCase__: Any = os.path.join(A_ ,F"{OPTIMIZER_NAME}_{optimizer_index}")
os.makedirs(A_ ,exist_ok=A_)
logger.info(F"Saving Optimizer state to {ckpt_dir}")
dist_cp.save_state_dict(
state_dict={"optimizer": optim_state} ,storage_writer=dist_cp.FileSystemWriter(A_) ,planner=DefaultSavePlanner() ,)
logger.info(F"Optimizer state saved in {ckpt_dir}")
def lowerCAmelCase_ ( A_ ,A_ ,A_ ,A_ ,A_ ,A_=0):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A_ ,fsdp_plugin.state_dict_type ,fsdp_plugin.state_dict_config ,fsdp_plugin.optim_state_dict_config):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
UpperCamelCase__: Union[str, Any] = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
UpperCamelCase__: Dict = (
F"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else F"{OPTIMIZER_NAME}_{optimizer_index}.bin"
)
UpperCamelCase__: Union[str, Any] = os.path.join(A_ ,A_)
logger.info(F"Loading Optimizer state from {input_optimizer_file}")
UpperCamelCase__: Optional[int] = torch.load(A_)
logger.info(F"Optimizer state loaded from {input_optimizer_file}")
else:
UpperCamelCase__: List[str] = (
os.path.join(A_ ,F"{OPTIMIZER_NAME}_{optimizer_index}")
if F"{OPTIMIZER_NAME}" not in input_dir
else input_dir
)
logger.info(F"Loading Optimizer from {ckpt_dir}")
UpperCamelCase__: Dict = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() ,optimizer_key="optimizer" ,storage_reader=dist_cp.FileSystemReader(A_) ,)
UpperCamelCase__: List[str] = optim_state["optimizer"]
logger.info(F"Optimizer loaded from {ckpt_dir}")
UpperCamelCase__: Optional[int] = FSDP.optim_state_dict_to_load(A_ ,A_ ,A_)
optimizer.load_state_dict(A_)
| 221 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__: Dict = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: int = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
A__: List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 221 | 1 |
"""simple docstring"""
def _lowerCamelCase ( UpperCAmelCase_ : str ) -> Optional[int]:
"""simple docstring"""
A__ , A__ = [], []
while len(UpperCAmelCase_ ) > 1:
A__ , A__ = min(UpperCAmelCase_ ), max(UpperCAmelCase_ )
start.append(UpperCAmelCase_ )
end.append(UpperCAmelCase_ )
collection.remove(UpperCAmelCase_ )
collection.remove(UpperCAmelCase_ )
end.reverse()
return start + collection + end
if __name__ == "__main__":
UpperCamelCase = input("""Enter numbers separated by a comma:\n""").strip()
UpperCamelCase = [int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 104 | from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase = logging.get_logger(__name__)
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : str = ["pixel_values"]
def __init__( self : List[Any] , A__ : bool = True , A__ : Optional[Dict[str, int]] = None , A__ : PILImageResampling = PILImageResampling.BILINEAR , A__ : bool = True , A__ : Dict[str, int] = None , A__ : bool = True , A__ : Union[int, float] = 1 / 2_55 , A__ : bool = True , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[float, List[float]]] = None , **A__ : int , ) -> None:
'''simple docstring'''
super().__init__(**A__ )
snake_case_ : Optional[int] = size if size is not None else {"shortest_edge": 2_56}
snake_case_ : Dict = get_size_dict(A__ , default_to_square=A__ )
snake_case_ : List[str] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
snake_case_ : Any = get_size_dict(A__ , param_name="crop_size" )
snake_case_ : int = do_resize
snake_case_ : Optional[Any] = size
snake_case_ : Optional[Any] = resample
snake_case_ : Optional[int] = do_center_crop
snake_case_ : List[Any] = crop_size
snake_case_ : List[Any] = do_rescale
snake_case_ : Optional[int] = rescale_factor
snake_case_ : Optional[Any] = do_normalize
snake_case_ : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case_ : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase__ ( self : List[str] , A__ : np.ndarray , A__ : Dict[str, int] , A__ : PILImageResampling = PILImageResampling.BICUBIC , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : str , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : Optional[Any] = get_size_dict(A__ , default_to_square=A__ )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
snake_case_ : Any = get_resize_output_image_size(A__ , size=size["shortest_edge"] , default_to_square=A__ )
return resize(A__ , size=A__ , resample=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : int , A__ : np.ndarray , A__ : Dict[str, int] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : Union[str, Any] , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : Tuple = get_size_dict(A__ )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(A__ , size=(size["height"], size["width"]) , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : List[str] , A__ : np.ndarray , A__ : float , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : Tuple ) -> np.ndarray:
'''simple docstring'''
return rescale(A__ , scale=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : Tuple , A__ : np.ndarray , A__ : Union[float, List[float]] , A__ : Union[float, List[float]] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : Dict , ) -> np.ndarray:
'''simple docstring'''
return normalize(A__ , mean=A__ , std=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : Union[str, Any] , A__ : ImageInput , A__ : Optional[bool] = None , A__ : Dict[str, int] = None , A__ : PILImageResampling = None , A__ : bool = None , A__ : Dict[str, int] = None , A__ : Optional[bool] = None , A__ : Optional[float] = None , A__ : Optional[bool] = None , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[str, TensorType]] = None , A__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **A__ : Union[str, Any] , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
snake_case_ : Dict = size if size is not None else self.size
snake_case_ : Optional[Any] = get_size_dict(A__ , default_to_square=A__ )
snake_case_ : Tuple = resample if resample is not None else self.resample
snake_case_ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : str = crop_size if crop_size is not None else self.crop_size
snake_case_ : Tuple = get_size_dict(A__ , param_name="crop_size" )
snake_case_ : Dict = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : Any = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : Any = image_mean if image_mean is not None else self.image_mean
snake_case_ : List[str] = image_std if image_std is not None else self.image_std
snake_case_ : Dict = make_list_of_images(A__ )
if not valid_images(A__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
snake_case_ : Tuple = [to_numpy_array(A__ ) for image in images]
if do_resize:
snake_case_ : Any = [self.resize(image=A__ , size=A__ , resample=A__ ) for image in images]
if do_center_crop:
snake_case_ : List[str] = [self.center_crop(image=A__ , size=A__ ) for image in images]
if do_rescale:
snake_case_ : Any = [self.rescale(image=A__ , scale=A__ ) for image in images]
if do_normalize:
snake_case_ : Union[str, Any] = [self.normalize(image=A__ , mean=A__ , std=A__ ) for image in images]
snake_case_ : Optional[Any] = [to_channel_dimension_format(A__ , A__ ) for image in images]
snake_case_ : Any = {"pixel_values": images}
return BatchFeature(data=A__ , tensor_type=A__ )
def UpperCAmelCase__ ( self : List[str] , A__ : Dict , A__ : List[Tuple] = None ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A__ ) != len(A__ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(A__ ):
snake_case_ : Dict = target_sizes.numpy()
snake_case_ : int = []
for idx in range(len(A__ ) ):
snake_case_ : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=A__ )
snake_case_ : int = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A__ )
else:
snake_case_ : List[Any] = logits.argmax(dim=1 )
snake_case_ : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 666 | 0 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCAmelCase = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def lowerCamelCase (a_ :Union[str, Any]) -> Optional[Any]:
for pegasus_name, hf_name in PATTERNS:
lowercase :List[Any] = k.replace(a_ , a_)
return k
def lowerCamelCase (a_ :dict , a_ :dict) -> PegasusForConditionalGeneration:
lowercase :Union[str, Any] = DEFAULTS.copy()
cfg_kwargs.update(a_)
lowercase :Optional[Any] = PegasusConfig(**a_)
lowercase :int = PegasusForConditionalGeneration(a_)
lowercase :Optional[int] = torch_model.model.state_dict()
lowercase :Tuple = {}
for k, v in tf_weights.items():
lowercase :Dict = rename_state_dict_key(a_)
if new_k not in sd:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""")
if "dense" in k or "proj" in new_k:
lowercase :Union[str, Any] = v.T
lowercase :List[str] = torch.tensor(a_ , dtype=sd[new_k].dtype)
assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
lowercase :Optional[int] = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1])
lowercase :Union[str, Any] = mapping['''shared.weight''']
lowercase :Dict = mapping['''shared.weight''']
lowercase :int = {k: torch.zeros_like(a_) for k, v in sd.items() if k.endswith('''bias''') and k not in mapping}
mapping.update(**a_)
lowercase , lowercase :Tuple = torch_model.model.load_state_dict(a_ , strict=a_)
lowercase :Dict = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def lowerCamelCase (a_ :Union[str, Any]="./ckpt/aeslc/model.ckpt-32000") -> Dict:
lowercase :Any = tf.train.list_variables(a_)
lowercase :str = {}
lowercase :Optional[Any] = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(a_ , desc='''converting tf checkpoint to dict'''):
lowercase :str = any(pat in name for pat in ignore_name)
if skip_key:
continue
lowercase :str = tf.train.load_variable(a_ , a_)
lowercase :Dict = array
return tf_weights
def lowerCamelCase (a_ :str , a_ :str) -> List[str]:
# save tokenizer first
lowercase :List[str] = Path(a_).parent.name
lowercase :List[Any] = task_specific_params[F"""summarization_{dataset}"""]['''max_position_embeddings''']
lowercase :List[Any] = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=a_)
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(a_)
# convert model
lowercase :Any = get_tf_weights_as_numpy(a_)
lowercase :str = task_specific_params[F"""summarization_{dataset}"""]
if dataset == "large":
lowercase :int = task_specific_params
lowercase :List[Any] = convert_pegasus(a_ , a_)
torch_model.save_pretrained(a_)
lowercase :Union[str, Any] = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''')
sd.pop('''model.encoder.embed_positions.weight''')
torch.save(a_ , Path(a_) / '''pytorch_model.bin''')
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
UpperCAmelCase = parser.parse_args()
if args.save_dir is None:
UpperCAmelCase = Path(args.tf_ckpt_path).parent.name
UpperCAmelCase = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 475 |
"""simple docstring"""
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__A : List[str] = VQModel
__A : Any = "sample"
@property
def __snake_case ( self : int , snake_case__ : int=(3_2, 3_2) ):
'''simple docstring'''
lowercase :Optional[int] = 4
lowercase :Tuple = 3
lowercase :List[str] = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case__ )
return {"sample": image}
@property
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
return (3, 3_2, 3_2)
@property
def __snake_case ( self : List[str] ):
'''simple docstring'''
return (3, 3_2, 3_2)
def __snake_case ( self : Dict ):
'''simple docstring'''
lowercase :Optional[int] = {
'''block_out_channels''': [3_2, 6_4],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 3,
}
lowercase :Dict = self.dummy_input
return init_dict, inputs_dict
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
pass
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
pass
def __snake_case ( self : Dict ):
'''simple docstring'''
lowercase , lowercase :Optional[int] = VQModel.from_pretrained('''fusing/vqgan-dummy''' , output_loading_info=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(snake_case__ )
lowercase :List[str] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
lowercase :Optional[int] = VQModel.from_pretrained('''fusing/vqgan-dummy''' )
model.to(snake_case__ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
lowercase :List[Any] = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
lowercase :int = image.to(snake_case__ )
with torch.no_grad():
lowercase :str = model(snake_case__ ).sample
lowercase :List[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowercase :Tuple = torch.tensor([-0.01_53, -0.40_44, -0.18_80, -0.51_61, -0.24_18, -0.40_72, -0.16_12, -0.06_33, -0.01_43] )
# fmt: on
self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1e-3 ) )
| 475 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
snake_case_ : Optional[Any] = logging.get_logger(__name__)
snake_case_ : List[Any] = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class snake_case_ ( _lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase = '''longformer'''
def __init__( self : Tuple , __magic_name__ : Union[List[int], int] = 512 , __magic_name__ : int = 2 , __magic_name__ : int = 1 , __magic_name__ : int = 0 , __magic_name__ : int = 2 , __magic_name__ : int = 3_0522 , __magic_name__ : int = 768 , __magic_name__ : int = 12 , __magic_name__ : int = 12 , __magic_name__ : int = 3072 , __magic_name__ : str = "gelu" , __magic_name__ : float = 0.1 , __magic_name__ : float = 0.1 , __magic_name__ : int = 512 , __magic_name__ : int = 2 , __magic_name__ : float = 0.02 , __magic_name__ : float = 1e-12 , __magic_name__ : bool = False , **__magic_name__ : str , ) -> List[Any]:
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCamelCase_ : Optional[int] = attention_window
lowerCamelCase_ : str = sep_token_id
lowerCamelCase_ : List[str] = bos_token_id
lowerCamelCase_ : Tuple = eos_token_id
lowerCamelCase_ : Optional[int] = vocab_size
lowerCamelCase_ : str = hidden_size
lowerCamelCase_ : Dict = num_hidden_layers
lowerCamelCase_ : Optional[int] = num_attention_heads
lowerCamelCase_ : List[Any] = hidden_act
lowerCamelCase_ : List[Any] = intermediate_size
lowerCamelCase_ : List[str] = hidden_dropout_prob
lowerCamelCase_ : str = attention_probs_dropout_prob
lowerCamelCase_ : Any = max_position_embeddings
lowerCamelCase_ : List[Any] = type_vocab_size
lowerCamelCase_ : Union[str, Any] = initializer_range
lowerCamelCase_ : Optional[int] = layer_norm_eps
lowerCamelCase_ : int = onnx_export
class snake_case_ ( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , __magic_name__ : "PretrainedConfig" , __magic_name__ : str = "default" , __magic_name__ : "List[PatchingSpec]" = None ) -> Union[str, Any]:
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase_ : Optional[int] = True
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCamelCase_ : str = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase_ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
lowerCamelCase_ : Union[str, Any] = super().outputs
if self.task == "default":
lowerCamelCase_ : Optional[Any] = {0: "batch"}
return outputs
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> float:
return 1e-4
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
return max(super().default_onnx_opset , 14 )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : "PreTrainedTokenizerBase" , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
lowerCamelCase_ : Any = super().generate_dummy_inputs(
preprocessor=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , seq_length=UpperCAmelCase_ , is_pair=UpperCAmelCase_ , framework=UpperCAmelCase_ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
lowerCamelCase_ : List[Any] = torch.zeros_like(inputs["input_ids"] )
# make every second token global
lowerCamelCase_ : Optional[int] = 1
return inputs
| 488 |
"""simple docstring"""
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __a ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
@register_to_config
def __init__( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False , )-> str:
"""simple docstring"""
super().__init__()
UpperCamelCase = nn.Embedding(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = nn.Embedding(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = False
UpperCamelCase = nn.Dropout(p=UpperCAmelCase_ )
UpperCamelCase = TaConfig(
vocab_size=UpperCAmelCase_ , d_model=UpperCAmelCase_ , num_heads=UpperCAmelCase_ , d_kv=UpperCAmelCase_ , d_ff=UpperCAmelCase_ , dropout_rate=UpperCAmelCase_ , feed_forward_proj=UpperCAmelCase_ , is_decoder=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , )
UpperCamelCase = nn.ModuleList()
for lyr_num in range(UpperCAmelCase_ ):
UpperCamelCase = TaBlock(UpperCAmelCase_ )
self.encoders.append(UpperCAmelCase_ )
UpperCamelCase = TaLayerNorm(UpperCAmelCase_ )
UpperCamelCase = nn.Dropout(p=UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str )-> List[Any]:
"""simple docstring"""
UpperCamelCase = self.token_embedder(UpperCAmelCase_ )
UpperCamelCase = encoder_input_tokens.shape[1]
UpperCamelCase = torch.arange(UpperCAmelCase_ , device=encoder_input_tokens.device )
x += self.position_encoding(UpperCAmelCase_ )
UpperCamelCase = self.dropout_pre(UpperCAmelCase_ )
# inverted the attention mask
UpperCamelCase = encoder_input_tokens.size()
UpperCamelCase = self.get_extended_attention_mask(UpperCAmelCase_ , UpperCAmelCase_ )
for lyr in self.encoders:
UpperCamelCase = lyr(UpperCAmelCase_ , UpperCAmelCase_ )[0]
UpperCamelCase = self.layer_norm(UpperCAmelCase_ )
return self.dropout_post(UpperCAmelCase_ ), encoder_inputs_mask
| 554 | 0 |
"""simple docstring"""
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def _snake_case ( ) -> Optional[int]:
'''simple docstring'''
_A = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
_A = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(__lowercase )
DownloadCommand.register_subcommand(__lowercase )
EnvironmentCommand.register_subcommand(__lowercase )
RunCommand.register_subcommand(__lowercase )
ServeCommand.register_subcommand(__lowercase )
UserCommands.register_subcommand(__lowercase )
AddNewModelCommand.register_subcommand(__lowercase )
AddNewModelLikeCommand.register_subcommand(__lowercase )
LfsCommands.register_subcommand(__lowercase )
PTtoTFCommand.register_subcommand(__lowercase )
# Let's go
_A = parser.parse_args()
if not hasattr(__lowercase , 'func' ):
parser.print_help()
exit(1 )
# Run
_A = args.func(__lowercase )
service.run()
if __name__ == "__main__":
main()
| 704 |
"""simple docstring"""
# using dfs for finding eulerian path traversal
def _snake_case ( _snake_case : str , _snake_case : List[Any] , _snake_case : List[Any] , _snake_case : Optional[Any]=None ) -> int:
'''simple docstring'''
_A = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
_A , _A = True, True
_A = dfs(_snake_case , _snake_case , _snake_case , _snake_case )
return path
def _snake_case ( _snake_case : Optional[Any] , _snake_case : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_A = 0
_A = -1
for i in range(_snake_case ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
_A = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def _snake_case ( _snake_case : Tuple , _snake_case : Any ) -> Tuple:
'''simple docstring'''
_A = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
_A , _A = check_circuit_or_path(_snake_case , _snake_case )
if check == 3:
print('graph is not Eulerian' )
print('no path' )
return
_A = 1
if check == 2:
_A = odd_node
print('graph has a Euler path' )
if check == 1:
print('graph has a Euler cycle' )
_A = dfs(_snake_case , _snake_case , _snake_case )
print(_snake_case )
def _snake_case ( ) -> str:
'''simple docstring'''
_A = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
_A = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
_A = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
_A = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
_A = {
1: [],
2: []
# all degree is zero
}
_A = 10
check_euler(_snake_case , _snake_case )
check_euler(_snake_case , _snake_case )
check_euler(_snake_case , _snake_case )
check_euler(_snake_case , _snake_case )
check_euler(_snake_case , _snake_case )
if __name__ == "__main__":
main()
| 505 | 0 |
"""simple docstring"""
def _a ( _snake_case ):
"""simple docstring"""
return " ".join(
"""""".join(word[::-1] ) if len(_snake_case ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("""Hey wollef sroirraw"""))
| 341 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
_UpperCamelCase = logging.get_logger(__name__)
class lowerCamelCase__ ( snake_case ):
def __init__( self ,*A ,**A ):
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" ,A ,)
super().__init__(*A ,**A )
| 341 | 1 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
lowerCAmelCase = 42
class UpperCamelCase_ ( snake_case_ , snake_case_ ):
'''simple docstring'''
@register_to_config
def __init__( self , a = 32 , a = 64 , a = 20 , a = 7_68 , a=77 , a=4 , a = 0.0 , a = "silu" , a = None , a = None , a = "linear" , a = "prd" , a = None , a = None , a = None , ) -> Union[str, Any]:
super().__init__()
snake_case_ = num_attention_heads
snake_case_ = attention_head_dim
snake_case_ = num_attention_heads * attention_head_dim
snake_case_ = additional_embeddings
snake_case_ = time_embed_dim or inner_dim
snake_case_ = embedding_proj_dim or embedding_dim
snake_case_ = clip_embed_dim or embedding_dim
snake_case_ = Timesteps(UpperCamelCase_ , UpperCamelCase_ , 0 )
snake_case_ = TimestepEmbedding(UpperCamelCase_ , UpperCamelCase_ , out_dim=UpperCamelCase_ , act_fn=UpperCamelCase_ )
snake_case_ = nn.Linear(UpperCamelCase_ , UpperCamelCase_ )
if embedding_proj_norm_type is None:
snake_case_ = None
elif embedding_proj_norm_type == "layer":
snake_case_ = nn.LayerNorm(UpperCamelCase_ )
else:
raise ValueError(F'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
snake_case_ = nn.Linear(UpperCamelCase_ , UpperCamelCase_ )
if encoder_hid_proj_type is None:
snake_case_ = None
elif encoder_hid_proj_type == "linear":
snake_case_ = nn.Linear(UpperCamelCase_ , UpperCamelCase_ )
else:
raise ValueError(F'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
snake_case_ = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCamelCase_ ) )
if added_emb_type == "prd":
snake_case_ = nn.Parameter(torch.zeros(1 , 1 , UpperCamelCase_ ) )
elif added_emb_type is None:
snake_case_ = None
else:
raise ValueError(
F'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
snake_case_ = nn.ModuleList(
[
BasicTransformerBlock(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , dropout=UpperCamelCase_ , activation_fn='gelu' , attention_bias=UpperCamelCase_ , )
for d in range(UpperCamelCase_ )
] )
if norm_in_type == "layer":
snake_case_ = nn.LayerNorm(UpperCamelCase_ )
elif norm_in_type is None:
snake_case_ = None
else:
raise ValueError(F'''Unsupported norm_in_type: {norm_in_type}.''' )
snake_case_ = nn.LayerNorm(UpperCamelCase_ )
snake_case_ = nn.Linear(UpperCamelCase_ , UpperCamelCase_ )
snake_case_ = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 )
causal_attention_mask.triu_(1 )
snake_case_ = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' , UpperCamelCase_ , persistent=UpperCamelCase_ )
snake_case_ = nn.Parameter(torch.zeros(1 , UpperCamelCase_ ) )
snake_case_ = nn.Parameter(torch.zeros(1 , UpperCamelCase_ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = {}
def fn_recursive_add_processors(a , a , a ):
if hasattr(UpperCamelCase_ , 'set_processor' ):
snake_case_ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , UpperCamelCase_ , UpperCamelCase_ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return processors
def _UpperCamelCase ( self , a ) -> List[str]:
snake_case_ = len(self.attn_processors.keys() )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(UpperCamelCase_ )} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(a , a , a ):
if hasattr(UpperCamelCase_ , 'set_processor' ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
module.set_processor(UpperCamelCase_ )
else:
module.set_processor(processor.pop(F'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , UpperCamelCase_ , UpperCamelCase_ )
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def _UpperCamelCase ( self ) -> Tuple:
self.set_attn_processor(AttnProcessor() )
def _UpperCamelCase ( self , a , a , a , a = None , a = None , a = True , ) -> Dict:
snake_case_ = hidden_states.shape[0]
snake_case_ = timestep
if not torch.is_tensor(UpperCamelCase_ ):
snake_case_ = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(UpperCamelCase_ ) and len(timesteps.shape ) == 0:
snake_case_ = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
snake_case_ = timesteps * torch.ones(UpperCamelCase_ , dtype=timesteps.dtype , device=timesteps.device )
snake_case_ = self.time_proj(UpperCamelCase_ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
snake_case_ = timesteps_projected.to(dtype=self.dtype )
snake_case_ = self.time_embedding(UpperCamelCase_ )
if self.embedding_proj_norm is not None:
snake_case_ = self.embedding_proj_norm(UpperCamelCase_ )
snake_case_ = self.embedding_proj(UpperCamelCase_ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
snake_case_ = self.encoder_hidden_states_proj(UpperCamelCase_ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
snake_case_ = self.proj_in(UpperCamelCase_ )
snake_case_ = self.positional_embedding.to(hidden_states.dtype )
snake_case_ = []
snake_case_ = 0
if encoder_hidden_states is not None:
additional_embeds.append(UpperCamelCase_ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
snake_case_ = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
snake_case_ = hidden_states[:, None, :]
snake_case_ = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
snake_case_ = self.prd_embedding.to(hidden_states.dtype ).expand(UpperCamelCase_ , -1 , -1 )
additional_embeds.append(UpperCamelCase_ )
snake_case_ = torch.cat(
UpperCamelCase_ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
snake_case_ = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
snake_case_ = F.pad(
UpperCamelCase_ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
snake_case_ = hidden_states + positional_embeddings
if attention_mask is not None:
snake_case_ = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
snake_case_ = F.pad(UpperCamelCase_ , (0, self.additional_embeddings) , value=0.0 )
snake_case_ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
snake_case_ = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
snake_case_ = self.norm_in(UpperCamelCase_ )
for block in self.transformer_blocks:
snake_case_ = block(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
snake_case_ = self.norm_out(UpperCamelCase_ )
if self.prd_embedding is not None:
snake_case_ = hidden_states[:, -1]
else:
snake_case_ = hidden_states[:, additional_embeddings_len:]
snake_case_ = self.proj_to_clip_embeddings(UpperCamelCase_ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=UpperCamelCase_ )
def _UpperCamelCase ( self , a ) -> List[str]:
snake_case_ = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 710 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 607 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class lowercase_ ( snake_case_ ):
A_ = "unispeech-sat"
def __init__( self : str , __lowerCamelCase : Union[str, Any]=32 , __lowerCamelCase : Tuple=768 , __lowerCamelCase : Tuple=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : str=3072 , __lowerCamelCase : str="gelu" , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : Tuple=0.0 , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Optional[int]=0.0_2 , __lowerCamelCase : Optional[Any]=1E-5 , __lowerCamelCase : str="group" , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Union[str, Any]=(512, 512, 512, 512, 512, 512, 512) , __lowerCamelCase : Dict=(5, 2, 2, 2, 2, 2, 2) , __lowerCamelCase : Dict=(10, 3, 3, 3, 3, 2, 2) , __lowerCamelCase : Any=False , __lowerCamelCase : List[str]=128 , __lowerCamelCase : Any=16 , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : int=True , __lowerCamelCase : List[Any]=0.0_5 , __lowerCamelCase : Dict=10 , __lowerCamelCase : str=2 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : List[str]=10 , __lowerCamelCase : Dict=0 , __lowerCamelCase : Any=320 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : List[Any]=100 , __lowerCamelCase : Tuple=256 , __lowerCamelCase : List[Any]=256 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : Dict="mean" , __lowerCamelCase : Tuple=False , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Any=256 , __lowerCamelCase : Dict=(512, 512, 512, 512, 1500) , __lowerCamelCase : Optional[int]=(5, 3, 3, 1, 1) , __lowerCamelCase : int=(1, 2, 3, 1, 1) , __lowerCamelCase : Any=512 , __lowerCamelCase : Dict=0 , __lowerCamelCase : Union[str, Any]=1 , __lowerCamelCase : int=2 , __lowerCamelCase : str=504 , **__lowerCamelCase : Optional[Any] , ):
super().__init__(**A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ )
snake_case__ : Tuple = hidden_size
snake_case__ : Dict = feat_extract_norm
snake_case__ : List[str] = feat_extract_activation
snake_case__ : Optional[Any] = list(A_ )
snake_case__ : Optional[int] = list(A_ )
snake_case__ : Optional[Any] = list(A_ )
snake_case__ : Dict = conv_bias
snake_case__ : str = num_conv_pos_embeddings
snake_case__ : Optional[Any] = num_conv_pos_embedding_groups
snake_case__ : Any = len(self.conv_dim )
snake_case__ : Optional[int] = num_hidden_layers
snake_case__ : List[str] = intermediate_size
snake_case__ : Union[str, Any] = hidden_act
snake_case__ : Any = num_attention_heads
snake_case__ : Tuple = hidden_dropout
snake_case__ : Any = attention_dropout
snake_case__ : Optional[int] = activation_dropout
snake_case__ : Tuple = feat_proj_dropout
snake_case__ : Any = final_dropout
snake_case__ : Tuple = layerdrop
snake_case__ : Tuple = layer_norm_eps
snake_case__ : str = initializer_range
snake_case__ : List[Any] = vocab_size
snake_case__ : Optional[Any] = num_clusters
snake_case__ : int = do_stable_layer_norm
snake_case__ : int = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case__ : Optional[Any] = apply_spec_augment
snake_case__ : List[Any] = mask_time_prob
snake_case__ : Optional[int] = mask_time_length
snake_case__ : List[str] = mask_time_min_masks
snake_case__ : Optional[int] = mask_feature_prob
snake_case__ : Any = mask_feature_length
snake_case__ : List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
snake_case__ : List[str] = num_codevectors_per_group
snake_case__ : Tuple = num_codevector_groups
snake_case__ : List[str] = contrastive_logits_temperature
snake_case__ : Any = feat_quantizer_dropout
snake_case__ : Tuple = num_negatives
snake_case__ : Optional[int] = codevector_dim
snake_case__ : Tuple = proj_codevector_dim
snake_case__ : Dict = diversity_loss_weight
# ctc loss
snake_case__ : int = ctc_loss_reduction
snake_case__ : Tuple = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
snake_case__ : int = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
snake_case__ : Union[str, Any] = list(A_ )
snake_case__ : Union[str, Any] = list(A_ )
snake_case__ : int = list(A_ )
snake_case__ : List[Any] = xvector_output_dim
@property
def _lowerCAmelCase ( self : int ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 270 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCAmelCase : Tuple = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCAmelCase : Optional[int] = TaTokenizerFast
lowerCAmelCase : Any = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCAmelCase : Tuple = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
)
| 3 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
lowercase__ : List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all BART models at https://huggingface.co/models?filter=bart
lowercase__ : List[Any] = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
"tokenizer_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json",
},
}
lowercase__ : Optional[int] = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
lowerCAmelCase_ = BartTokenizer
def __init__( self : List[Any] , __lowercase : Optional[int]=None , __lowercase : Any=None , __lowercase : Dict=None , __lowercase : Any="replace" , __lowercase : Tuple="<s>" , __lowercase : Union[str, Any]="</s>" , __lowercase : Optional[Any]="</s>" , __lowercase : Tuple="<s>" , __lowercase : Optional[Any]="<unk>" , __lowercase : Dict="<pad>" , __lowercase : Any="<mask>" , __lowercase : Tuple=False , __lowercase : List[str]=True , **__lowercase : Union[str, Any] , ):
"""simple docstring"""
super().__init__(
__lowercase , __lowercase , tokenizer_file=__lowercase , errors=__lowercase , bos_token=__lowercase , eos_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , unk_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , add_prefix_space=__lowercase , trim_offsets=__lowercase , **__lowercase , )
snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __lowercase ) != add_prefix_space:
snake_case_ = getattr(__lowercase , pre_tok_state.pop("type" ) )
snake_case_ = add_prefix_space
snake_case_ = pre_tok_class(**__lowercase )
snake_case_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case_ = "post_processor"
snake_case_ = getattr(self.backend_tokenizer , __lowercase , __lowercase )
if tokenizer_component_instance:
snake_case_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case_ = tuple(state["sep"] )
if "cls" in state:
snake_case_ = tuple(state["cls"] )
snake_case_ = False
if state.get("add_prefix_space" , __lowercase ) != add_prefix_space:
snake_case_ = add_prefix_space
snake_case_ = True
if state.get("trim_offsets" , __lowercase ) != trim_offsets:
snake_case_ = trim_offsets
snake_case_ = True
if changes_to_apply:
snake_case_ = getattr(__lowercase , state.pop("type" ) )
snake_case_ = component_class(**__lowercase )
setattr(self.backend_tokenizer , __lowercase , __lowercase )
@property
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def snake_case__ ( self : Any , __lowercase : Optional[Any] ):
"""simple docstring"""
snake_case_ = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else value
snake_case_ = value
def snake_case__ ( self : Optional[int] , *__lowercase : List[Any] , **__lowercase : Union[str, Any] ):
"""simple docstring"""
snake_case_ = kwargs.get("is_split_into_words" , __lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__lowercase , **__lowercase )
def snake_case__ ( self : Dict , *__lowercase : int , **__lowercase : Optional[Any] ):
"""simple docstring"""
snake_case_ = kwargs.get("is_split_into_words" , __lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._encode_plus(*__lowercase , **__lowercase )
def snake_case__ ( self : Optional[Any] , __lowercase : str , __lowercase : Optional[str] = None ):
"""simple docstring"""
snake_case_ = self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase )
def snake_case__ ( self : Optional[int] , __lowercase : Union[str, Any] , __lowercase : int=None ):
"""simple docstring"""
snake_case_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def snake_case__ ( self : Optional[int] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
"""simple docstring"""
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 139 |
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def lowerCamelCase__ ( _A , _A , _A , _A=1024 ):
'''simple docstring'''
snake_case_ , snake_case_ = [], []
snake_case_ = list(zip(_A , _A ) )
snake_case_ , snake_case_ = sorted_examples[0]
def is_too_big(_A ):
return tok(_A , return_tensors="pt" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
snake_case_ = new_src + " " + src
snake_case_ = new_tgt + " " + tgt
if is_too_big(_A ) or is_too_big(_A ): # cant fit, finalize example
finished_src.append(_A )
finished_tgt.append(_A )
snake_case_ , snake_case_ = src, tgt
else: # can fit, keep adding
snake_case_ , snake_case_ = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(_A )
finished_tgt.append(_A )
return finished_src, finished_tgt
def lowerCamelCase__ ( _A , _A , _A , _A ):
'''simple docstring'''
snake_case_ = Path(_A )
save_path.mkdir(exist_ok=_A )
for split in ["train"]:
snake_case_ , snake_case_ = data_dir / f"{split}.source", data_dir / f"{split}.target"
snake_case_ = [x.rstrip() for x in Path(_A ).open().readlines()]
snake_case_ = [x.rstrip() for x in Path(_A ).open().readlines()]
snake_case_ , snake_case_ = pack_examples(_A , _A , _A , _A )
print(f"packed {split} split from {len(_A )} examples -> {len(_A )}." )
Path(save_path / f"{split}.source" ).open("w" ).write("\n".join(_A ) )
Path(save_path / f"{split}.target" ).open("w" ).write("\n".join(_A ) )
for split in ["val", "test"]:
snake_case_ , snake_case_ = data_dir / f"{split}.source", data_dir / f"{split}.target"
shutil.copyfile(_A , save_path / f"{split}.source" )
shutil.copyfile(_A , save_path / f"{split}.target" )
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = argparse.ArgumentParser()
parser.add_argument("--tok_name" , type=_A , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("--max_seq_len" , type=_A , default=128 )
parser.add_argument("--data_dir" , type=_A )
parser.add_argument("--save_path" , type=_A )
snake_case_ = parser.parse_args()
snake_case_ = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(_A , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 139 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json''',
}
class A ( __UpperCAmelCase ):
__snake_case = 'gpt_neox_japanese'
def __init__( self, UpperCamelCase__=3_2000, UpperCamelCase__=2560, UpperCamelCase__=32, UpperCamelCase__=32, UpperCamelCase__=4, UpperCamelCase__="gelu", UpperCamelCase__=1.00, UpperCamelCase__=1_0000, UpperCamelCase__=2048, UpperCamelCase__=0.02, UpperCamelCase__=1E-5, UpperCamelCase__=True, UpperCamelCase__=3_1996, UpperCamelCase__=3_1999, UpperCamelCase__=0.1, UpperCamelCase__=0.0, **UpperCamelCase__, ):
"""simple docstring"""
super().__init__(bos_token_id=UpperCamelCase__, eos_token_id=UpperCamelCase__, **UpperCamelCase__ )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_multiple_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = rotary_pct
lowerCAmelCase_ = rotary_emb_base
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = use_cache
lowerCAmelCase_ = attention_dropout
lowerCAmelCase_ = hidden_dropout
| 431 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class A :
def __init__( self, UpperCamelCase__, UpperCamelCase__=13, UpperCamelCase__=10, UpperCamelCase__=3, UpperCamelCase__=2, UpperCamelCase__=2, UpperCamelCase__=2, UpperCamelCase__=True, UpperCamelCase__=True, UpperCamelCase__=32, UpperCamelCase__=5, UpperCamelCase__=4, UpperCamelCase__=37, UpperCamelCase__="gelu", UpperCamelCase__=0.1, UpperCamelCase__=0.1, UpperCamelCase__=10, UpperCamelCase__=0.02, UpperCamelCase__=0.9, UpperCamelCase__=None, ):
"""simple docstring"""
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = image_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = patch_size
lowerCAmelCase_ = tubelet_size
lowerCAmelCase_ = num_frames
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = type_sequence_label_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = mask_ratio
lowerCAmelCase_ = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
lowerCAmelCase_ = (image_size // patch_size) ** 2
lowerCAmelCase_ = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
lowerCAmelCase_ = int(mask_ratio * self.seq_length )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ = None
if self.use_labels:
lowerCAmelCase_ = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCAmelCase_ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return VideoMAEConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_frames=self.num_frames, tubelet_size=self.tubelet_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=UpperCamelCase__, initializer_range=self.initializer_range, )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = VideoMAEModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCAmelCase_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = VideoMAEForPreTraining(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowerCAmelCase_ = torch.ones((self.num_masks,) )
lowerCAmelCase_ = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
lowerCAmelCase_ = mask.expand(self.batch_size, -1 ).bool()
lowerCAmelCase_ = model(UpperCamelCase__, UpperCamelCase__ )
# model only returns predictions for masked patches
lowerCAmelCase_ = mask.sum().item()
lowerCAmelCase_ = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape, (self.batch_size, num_masked_patches, decoder_num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = config_and_inputs
lowerCAmelCase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__snake_case = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
__snake_case = (
{'feature-extraction': VideoMAEModel, 'video-classification': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = VideoMAEModelTester(self )
lowerCAmelCase_ = ConfigTester(self, config_class=UpperCamelCase__, has_text_modality=UpperCamelCase__, hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__=False ):
"""simple docstring"""
lowerCAmelCase_ = copy.deepcopy(UpperCamelCase__ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowerCAmelCase_ = torch.ones((self.model_tester.num_masks,) )
lowerCAmelCase_ = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
lowerCAmelCase_ = mask.expand(self.model_tester.batch_size, -1 ).bool()
lowerCAmelCase_ = bool_masked_pos.to(UpperCamelCase__ )
if return_labels:
if model_class in [
*get_values(UpperCamelCase__ ),
]:
lowerCAmelCase_ = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=UpperCamelCase__ )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''VideoMAE does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
lowerCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__, nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = model_class(UpperCamelCase__ )
lowerCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ = [*signature.parameters.keys()]
lowerCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1], UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ = VideoMAEModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
if not self.has_attentions:
pass
else:
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ = True
for model_class in self.all_model_classes:
lowerCAmelCase_ = self.model_tester.seq_length - self.model_tester.num_masks
lowerCAmelCase_ = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = True
lowerCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__, UpperCamelCase__ ) )
lowerCAmelCase_ = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ), self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase_ = True
lowerCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__, UpperCamelCase__ ) )
lowerCAmelCase_ = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_len, seq_len], )
lowerCAmelCase_ = len(UpperCamelCase__ )
# Check attention is always last and order is fine
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__, UpperCamelCase__ ) )
self.assertEqual(out_len + 1, len(UpperCamelCase__ ) )
lowerCAmelCase_ = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_len, seq_len], )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
lowerCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__, UpperCamelCase__ ) )
lowerCAmelCase_ = outputs.hidden_states
lowerCAmelCase_ = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(UpperCamelCase__ ), UpperCamelCase__ )
lowerCAmelCase_ = self.model_tester.seq_length - self.model_tester.num_masks
lowerCAmelCase_ = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [seq_length, self.model_tester.hidden_size], )
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = True
check_hidden_states_output(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ = True
check_hidden_states_output(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
def __UpperCamelCase ( ):
lowerCAmelCase_ = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
lowerCAmelCase_ = np.load(_A )
return list(_A )
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''' ).to(
UpperCamelCase__ )
lowerCAmelCase_ = self.default_image_processor
lowerCAmelCase_ = prepare_video()
lowerCAmelCase_ = image_processor(UpperCamelCase__, return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCAmelCase_ = model(**UpperCamelCase__ )
# verify the logits
lowerCAmelCase_ = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape, UpperCamelCase__ )
lowerCAmelCase_ = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], UpperCamelCase__, atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ).to(UpperCamelCase__ )
lowerCAmelCase_ = self.default_image_processor
lowerCAmelCase_ = prepare_video()
lowerCAmelCase_ = image_processor(UpperCamelCase__, return_tensors='''pt''' ).to(UpperCamelCase__ )
# add boolean mask, indicating which patches to mask
lowerCAmelCase_ = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''', filename='''bool_masked_pos.pt''' )
lowerCAmelCase_ = torch.load(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCAmelCase_ = model(**UpperCamelCase__ )
# verify the logits
lowerCAmelCase_ = torch.Size([1, 1408, 1536] )
lowerCAmelCase_ = torch.tensor(
[[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]], device=UpperCamelCase__ )
self.assertEqual(outputs.logits.shape, UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], UpperCamelCase__, atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
lowerCAmelCase_ = torch.tensor([0.5_142], device=UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.loss, UpperCamelCase__, atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
lowerCAmelCase_ = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''', norm_pix_loss=UpperCamelCase__ ).to(
UpperCamelCase__ )
with torch.no_grad():
lowerCAmelCase_ = model(**UpperCamelCase__ )
lowerCAmelCase_ = torch.tensor(torch.tensor([0.6_469] ), device=UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.loss, UpperCamelCase__, atol=1E-4 ) )
| 431 | 1 |
'''simple docstring'''
from torch import nn
def A_ ( __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F"Unsupported activation function: {act_fn}" )
| 704 |
'''simple docstring'''
def A_ ( __SCREAMING_SNAKE_CASE : int ) -> None:
"""simple docstring"""
__A : Tuple = generate_pascal_triangle(__SCREAMING_SNAKE_CASE )
for row_idx in range(__SCREAMING_SNAKE_CASE ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=""" """ )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=""" """ )
else:
print(triangle[row_idx][col_idx] , end="""""" )
print()
def A_ ( __SCREAMING_SNAKE_CASE : int ) -> list[list[int]]:
"""simple docstring"""
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
__A : list[list[int]] = []
for current_row_idx in range(__SCREAMING_SNAKE_CASE ):
__A : Tuple = populate_current_row(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
triangle.append(__SCREAMING_SNAKE_CASE )
return triangle
def A_ ( __SCREAMING_SNAKE_CASE : list[list[int]] , __SCREAMING_SNAKE_CASE : int ) -> list[int]:
"""simple docstring"""
__A : Union[str, Any] = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
__A , __A : Tuple = 1, 1
for current_col_idx in range(1 , __SCREAMING_SNAKE_CASE ):
calculate_current_element(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return current_row
def A_ ( __SCREAMING_SNAKE_CASE : list[list[int]] , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , ) -> None:
"""simple docstring"""
__A : str = triangle[current_row_idx - 1][current_col_idx - 1]
__A : Union[str, Any] = triangle[current_row_idx - 1][current_col_idx]
__A : Any = above_to_left_elt + above_to_right_elt
def A_ ( __SCREAMING_SNAKE_CASE : int ) -> list[list[int]]:
"""simple docstring"""
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
__A : list[list[int]] = [[1]]
for row_index in range(1 , __SCREAMING_SNAKE_CASE ):
__A : Optional[Any] = [0] + result[-1] + [0]
__A : Optional[Any] = row_index + 1
# Calculate the number of distinct elements in a row
__A : int = sum(divmod(__SCREAMING_SNAKE_CASE , 2 ) )
__A : str = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
__A : Tuple = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
__A : Any = row_first_half + row_second_half
result.append(__SCREAMING_SNAKE_CASE )
return result
def A_ ( ) -> None:
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__SCREAMING_SNAKE_CASE : Callable , __SCREAMING_SNAKE_CASE : int ) -> None:
__A : List[Any] = F"{func.__name__}({value})"
__A : Dict = timeit(F"__main__.{call}" , setup="""import __main__""" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 499 | 0 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = OrderedDict(
[
('align', 'EfficientNetImageProcessor'),
('beit', 'BeitImageProcessor'),
('bit', 'BitImageProcessor'),
('blip', 'BlipImageProcessor'),
('blip-2', 'BlipImageProcessor'),
('bridgetower', 'BridgeTowerImageProcessor'),
('chinese_clip', 'ChineseCLIPImageProcessor'),
('clip', 'CLIPImageProcessor'),
('clipseg', 'ViTImageProcessor'),
('conditional_detr', 'ConditionalDetrImageProcessor'),
('convnext', 'ConvNextImageProcessor'),
('convnextv2', 'ConvNextImageProcessor'),
('cvt', 'ConvNextImageProcessor'),
('data2vec-vision', 'BeitImageProcessor'),
('deformable_detr', 'DeformableDetrImageProcessor'),
('deit', 'DeiTImageProcessor'),
('deta', 'DetaImageProcessor'),
('detr', 'DetrImageProcessor'),
('dinat', 'ViTImageProcessor'),
('donut-swin', 'DonutImageProcessor'),
('dpt', 'DPTImageProcessor'),
('efficientformer', 'EfficientFormerImageProcessor'),
('efficientnet', 'EfficientNetImageProcessor'),
('flava', 'FlavaImageProcessor'),
('focalnet', 'BitImageProcessor'),
('git', 'CLIPImageProcessor'),
('glpn', 'GLPNImageProcessor'),
('groupvit', 'CLIPImageProcessor'),
('imagegpt', 'ImageGPTImageProcessor'),
('instructblip', 'BlipImageProcessor'),
('layoutlmv2', 'LayoutLMv2ImageProcessor'),
('layoutlmv3', 'LayoutLMv3ImageProcessor'),
('levit', 'LevitImageProcessor'),
('mask2former', 'Mask2FormerImageProcessor'),
('maskformer', 'MaskFormerImageProcessor'),
('mgp-str', 'ViTImageProcessor'),
('mobilenet_v1', 'MobileNetV1ImageProcessor'),
('mobilenet_v2', 'MobileNetV2ImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevitv2', 'MobileViTImageProcessor'),
('nat', 'ViTImageProcessor'),
('oneformer', 'OneFormerImageProcessor'),
('owlvit', 'OwlViTImageProcessor'),
('perceiver', 'PerceiverImageProcessor'),
('pix2struct', 'Pix2StructImageProcessor'),
('poolformer', 'PoolFormerImageProcessor'),
('regnet', 'ConvNextImageProcessor'),
('resnet', 'ConvNextImageProcessor'),
('sam', 'SamImageProcessor'),
('segformer', 'SegformerImageProcessor'),
('swiftformer', 'ViTImageProcessor'),
('swin', 'ViTImageProcessor'),
('swin2sr', 'Swin2SRImageProcessor'),
('swinv2', 'ViTImageProcessor'),
('table-transformer', 'DetrImageProcessor'),
('timesformer', 'VideoMAEImageProcessor'),
('tvlt', 'TvltImageProcessor'),
('upernet', 'SegformerImageProcessor'),
('van', 'ConvNextImageProcessor'),
('videomae', 'VideoMAEImageProcessor'),
('vilt', 'ViltImageProcessor'),
('vit', 'ViTImageProcessor'),
('vit_hybrid', 'ViTHybridImageProcessor'),
('vit_mae', 'ViTImageProcessor'),
('vit_msn', 'ViTImageProcessor'),
('xclip', 'CLIPImageProcessor'),
('yolos', 'YolosImageProcessor'),
]
)
lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def a_ ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_lowerCamelCase : Optional[Any] =model_type_to_module_name(SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : int =importlib.import_module(F'''.{module_name}''' , 'transformers.models' )
try:
return getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(SCREAMING_SNAKE_CASE__ , '__name__' , SCREAMING_SNAKE_CASE__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_lowerCamelCase : Optional[Any] =importlib.import_module('transformers' )
if hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return None
def a_ ( SCREAMING_SNAKE_CASE__ : Union[str, os.PathLike] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, os.PathLike]] = None , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, str]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[bool, str]] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = None , SCREAMING_SNAKE_CASE__ : bool = False , **SCREAMING_SNAKE_CASE__ : str , ):
'''simple docstring'''
_lowerCamelCase : str =get_file_from_repo(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , revision=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , )
if resolved_config_file is None:
logger.info(
'Could not locate the image processor configuration file, will try to use the model config instead.' )
return {}
with open(SCREAMING_SNAKE_CASE__ , encoding='utf-8' ) as reader:
return json.load(SCREAMING_SNAKE_CASE__ )
class A :
def __init__( self : Tuple ) -> List[str]:
"""simple docstring"""
raise EnvironmentError(
'AutoImageProcessor is designed to be instantiated '
'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(lowercase_ )
def lowerCamelCase ( cls : str , lowercase_ : Optional[Any] , **lowercase_ : int ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : int =kwargs.pop('config' , lowercase_ )
_lowerCamelCase : Dict =kwargs.pop('trust_remote_code' , lowercase_ )
_lowerCamelCase : Tuple =True
_lowerCamelCase , _lowerCamelCase : str =ImageProcessingMixin.get_image_processor_dict(lowercase_ , **lowercase_ )
_lowerCamelCase : int =config_dict.get('image_processor_type' , lowercase_ )
_lowerCamelCase : Union[str, Any] =None
if "AutoImageProcessor" in config_dict.get('auto_map' , {} ):
_lowerCamelCase : Tuple =config_dict['auto_map']['AutoImageProcessor']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_lowerCamelCase : Union[str, Any] =config_dict.pop('feature_extractor_type' , lowercase_ )
if feature_extractor_class is not None:
logger.warning(
'Could not find image processor class in the image processor config or the model config. Loading'
' based on pattern matching with the model\'s feature extractor configuration.' )
_lowerCamelCase : List[Any] =feature_extractor_class.replace('FeatureExtractor' , 'ImageProcessor' )
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ):
_lowerCamelCase : Tuple =config_dict['auto_map']['AutoFeatureExtractor']
_lowerCamelCase : Union[str, Any] =feature_extractor_auto_map.replace('FeatureExtractor' , 'ImageProcessor' )
logger.warning(
'Could not find image processor auto map in the image processor config or the model config.'
' Loading based on pattern matching with the model\'s feature extractor configuration.' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(lowercase_ , lowercase_ ):
_lowerCamelCase : str =AutoConfig.from_pretrained(lowercase_ , **lowercase_ )
# It could be in `config.image_processor_type``
_lowerCamelCase : int =getattr(lowercase_ , 'image_processor_type' , lowercase_ )
if hasattr(lowercase_ , 'auto_map' ) and "AutoImageProcessor" in config.auto_map:
_lowerCamelCase : List[Any] =config.auto_map['AutoImageProcessor']
if image_processor_class is not None:
_lowerCamelCase : Any =image_processor_class_from_name(lowercase_ )
_lowerCamelCase : Optional[Any] =image_processor_auto_map is not None
_lowerCamelCase : str =image_processor_class is not None or type(lowercase_ ) in IMAGE_PROCESSOR_MAPPING
_lowerCamelCase : List[Any] =resolve_trust_remote_code(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if has_remote_code and trust_remote_code:
_lowerCamelCase : str =get_class_from_dynamic_module(
lowercase_ , lowercase_ , **lowercase_ )
_lowerCamelCase : Tuple =kwargs.pop('code_revision' , lowercase_ )
if os.path.isdir(lowercase_ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(lowercase_ , **lowercase_ )
elif image_processor_class is not None:
return image_processor_class.from_dict(lowercase_ , **lowercase_ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(lowercase_ ) in IMAGE_PROCESSOR_MAPPING:
_lowerCamelCase : Optional[int] =IMAGE_PROCESSOR_MAPPING[type(lowercase_ )]
return image_processor_class.from_dict(lowercase_ , **lowercase_ )
raise ValueError(
F'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
F'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def lowerCamelCase ( lowercase_ : List[str] , lowercase_ : List[str] ) -> List[Any]:
"""simple docstring"""
IMAGE_PROCESSOR_MAPPING.register(lowercase_ , lowercase_ )
| 464 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class A ( UpperCamelCase_ , unittest.TestCase ):
UpperCamelCase__ : List[Any] =XLMRobertaTokenizer
UpperCamelCase__ : Union[str, Any] =XLMRobertaTokenizerFast
UpperCamelCase__ : int =True
UpperCamelCase__ : Optional[Any] =True
def lowerCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase : Dict =XLMRobertaTokenizer(lowercase_ , keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
_lowerCamelCase : Tuple ='<pad>'
_lowerCamelCase : Optional[int] =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def lowerCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : str =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(lowercase_ ) , 1002 )
def lowerCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1002 )
def lowerCamelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Optional[Any] =XLMRobertaTokenizer(lowercase_ , keep_accents=lowercase_ )
_lowerCamelCase : List[Any] =tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCamelCase : int =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_lowerCamelCase : Optional[Any] =tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
_lowerCamelCase : Optional[Any] =tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def lowerCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_lowerCamelCase : List[str] =(self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowerCamelCase : int =self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
_lowerCamelCase : List[Any] =self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
_lowerCamelCase : int =tempfile.mkdtemp()
_lowerCamelCase : List[str] =tokenizer_r.save_pretrained(lowercase_ )
_lowerCamelCase : int =tokenizer_p.save_pretrained(lowercase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
_lowerCamelCase : Optional[Any] =tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(lowercase_ , lowercase_ )
# Checks everything loads correctly in the same way
_lowerCamelCase : int =tokenizer_r.from_pretrained(lowercase_ )
_lowerCamelCase : Any =tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowercase_ )
# Save tokenizer rust, legacy_format=True
_lowerCamelCase : Dict =tempfile.mkdtemp()
_lowerCamelCase : int =tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_ )
_lowerCamelCase : Optional[Any] =tokenizer_p.save_pretrained(lowercase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowercase_ , lowercase_ )
# Checks everything loads correctly in the same way
_lowerCamelCase : int =tokenizer_r.from_pretrained(lowercase_ )
_lowerCamelCase : List[Any] =tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
shutil.rmtree(lowercase_ )
# Save tokenizer rust, legacy_format=False
_lowerCamelCase : str =tempfile.mkdtemp()
_lowerCamelCase : Optional[Any] =tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_ )
_lowerCamelCase : Any =tokenizer_p.save_pretrained(lowercase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_lowerCamelCase : str =tokenizer_r.from_pretrained(lowercase_ )
_lowerCamelCase : List[str] =tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
shutil.rmtree(lowercase_ )
@cached_property
def lowerCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base' )
def lowerCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowercase_ , f.name )
_lowerCamelCase : Union[str, Any] =XLMRobertaTokenizer(f.name , keep_accents=lowercase_ )
_lowerCamelCase : Dict =pickle.dumps(lowercase_ )
pickle.loads(lowercase_ )
def lowerCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_lowerCamelCase : Any =self.get_tokenizer()
_lowerCamelCase : Optional[int] =self.get_rust_tokenizer()
_lowerCamelCase : Tuple ='I was born in 92000, and this is falsé.'
_lowerCamelCase : Any =tokenizer.tokenize(lowercase_ )
_lowerCamelCase : List[str] =rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : int =tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
_lowerCamelCase : Union[str, Any] =rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : Dict =self.get_rust_tokenizer()
_lowerCamelCase : Optional[int] =tokenizer.encode(lowercase_ )
_lowerCamelCase : Optional[Any] =rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
@slow
def lowerCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : Optional[Any] ='Hello World!'
_lowerCamelCase : Union[str, Any] =[0, 3_5378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@slow
def lowerCamelCase ( self : Any ) -> int:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] =(
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
_lowerCamelCase : List[str] =[
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
17_9459,
12_4850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
1_0114,
711,
152,
20,
6,
5,
2_2376,
642,
1221,
1_5190,
3_4153,
450,
5608,
959,
1119,
5_7702,
136,
186,
47,
1098,
2_9367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
5_0901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@slow
def lowerCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : List[Any] ={'input_ids': [[0, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [0, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name='xlm-roberta-base' , revision='d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3' , )
| 464 | 1 |
'''simple docstring'''
def __a ( __lowerCamelCase : Any ) -> List[Any]:
'''simple docstring'''
stooge(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_ ) - 1 )
return arr
def __a ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
lowercase_ , lowercase_ = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
lowercase_ = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (h - t) )
# Recursively sort last 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , i + t , (SCREAMING_SNAKE_CASE_) )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (h - t) )
if __name__ == "__main__":
lowerCAmelCase_ : Any = input("Enter numbers separated by a comma:\n").strip()
lowerCAmelCase_ : str = [int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted))
| 715 | '''simple docstring'''
from __future__ import annotations
def __a ( __lowerCamelCase : int ) -> list[int]:
'''simple docstring'''
lowercase_ = [True] * limit
lowercase_ = False
lowercase_ = False
lowercase_ = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
lowercase_ = i * 2
while index < limit:
lowercase_ = False
lowercase_ = index + i
lowercase_ = [2]
for i in range(3 , __lowerCamelCase , 2 ):
if is_prime[i]:
primes.append(__lowerCamelCase )
return primes
def __a ( __lowerCamelCase : int = 1_000_000 ) -> int:
'''simple docstring'''
lowercase_ = prime_sieve(__lowerCamelCase )
lowercase_ = 0
lowercase_ = 0
for i in range(len(__lowerCamelCase ) ):
for j in range(i + length , len(__lowerCamelCase ) ):
lowercase_ = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowercase_ = j - i
lowercase_ = sol
return largest
if __name__ == "__main__":
print(F'''{solution() = }''')
| 461 | 0 |
"""simple docstring"""
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""facebook/data2vec-base-960h""": """https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json""",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class __snake_case ( __lowerCAmelCase ):
"""simple docstring"""
_lowerCamelCase = '''data2vec-audio'''
def __init__( self , __lowerCamelCase=32 , __lowerCamelCase=768 , __lowerCamelCase=12 , __lowerCamelCase=12 , __lowerCamelCase=3072 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=0.0 , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-5 , __lowerCamelCase="gelu" , __lowerCamelCase=(512, 512, 512, 512, 512, 512, 512) , __lowerCamelCase=(5, 2, 2, 2, 2, 2, 2) , __lowerCamelCase=(10, 3, 3, 3, 3, 2, 2) , __lowerCamelCase=False , __lowerCamelCase=16 , __lowerCamelCase=19 , __lowerCamelCase=5 , __lowerCamelCase=0.0_5 , __lowerCamelCase=10 , __lowerCamelCase=2 , __lowerCamelCase=0.0 , __lowerCamelCase=10 , __lowerCamelCase=0 , __lowerCamelCase="sum" , __lowerCamelCase=False , __lowerCamelCase=False , __lowerCamelCase=256 , __lowerCamelCase=(512, 512, 512, 512, 1500) , __lowerCamelCase=(5, 3, 3, 1, 1) , __lowerCamelCase=(1, 2, 3, 1, 1) , __lowerCamelCase=512 , __lowerCamelCase=0 , __lowerCamelCase=1 , __lowerCamelCase=2 , __lowerCamelCase=False , __lowerCamelCase=3 , __lowerCamelCase=2 , __lowerCamelCase=3 , __lowerCamelCase=None , **__lowerCamelCase , ):
'''simple docstring'''
super().__init__(**_lowerCamelCase , pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase )
__A : Optional[Any] = hidden_size
__A : Dict = feat_extract_activation
__A : Any = list(_lowerCamelCase )
__A : Tuple = list(_lowerCamelCase )
__A : Optional[int] = list(_lowerCamelCase )
__A : List[Any] = conv_bias
__A : List[str] = num_conv_pos_embeddings
__A : Optional[int] = num_conv_pos_embedding_groups
__A : Tuple = conv_pos_kernel_size
__A : List[Any] = len(self.conv_dim )
__A : int = num_hidden_layers
__A : List[str] = intermediate_size
__A : Tuple = hidden_act
__A : List[Any] = num_attention_heads
__A : List[str] = hidden_dropout
__A : List[str] = attention_dropout
__A : Optional[Any] = activation_dropout
__A : Optional[Any] = feat_proj_dropout
__A : Tuple = final_dropout
__A : Union[str, Any] = layerdrop
__A : Optional[Any] = layer_norm_eps
__A : Dict = initializer_range
__A : Union[str, Any] = vocab_size
__A : Optional[int] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__A : Union[str, Any] = mask_time_prob
__A : Optional[Any] = mask_time_length
__A : Union[str, Any] = mask_time_min_masks
__A : Dict = mask_feature_prob
__A : List[str] = mask_feature_length
__A : Union[str, Any] = mask_feature_min_masks
# ctc loss
__A : Any = ctc_loss_reduction
__A : str = ctc_zero_infinity
# adapter
__A : Optional[Any] = add_adapter
__A : Union[str, Any] = adapter_kernel_size
__A : List[str] = adapter_stride
__A : Union[str, Any] = num_adapter_layers
__A : int = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__A : Dict = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__A : Any = list(_lowerCamelCase )
__A : Union[str, Any] = list(_lowerCamelCase )
__A : Optional[Any] = list(_lowerCamelCase )
__A : str = xvector_output_dim
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return math.prod(self.conv_stride )
| 177 | """simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
a =None
a =logging.get_logger(__name__)
a ={'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
a ={
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
a ={
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
a ='▁'
class __UpperCAmelCase ( __lowerCAmelCase ):
A__ : Dict = VOCAB_FILES_NAMES
A__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
A__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Union[str, Any] = AlbertTokenizer
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase="[CLS]" , _lowerCamelCase="[SEP]" , _lowerCamelCase="<unk>" , _lowerCamelCase="[SEP]" , _lowerCamelCase="<pad>" , _lowerCamelCase="[CLS]" , _lowerCamelCase="[MASK]" , **_lowerCamelCase , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowerCamelCase__ =(
AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase , normalized=_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase )
else mask_token
)
super().__init__(
_lowerCamelCase , tokenizer_file=_lowerCamelCase , do_lower_case=_lowerCamelCase , remove_space=_lowerCamelCase , keep_accents=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , **_lowerCamelCase , )
lowerCamelCase__ =do_lower_case
lowerCamelCase__ =remove_space
lowerCamelCase__ =keep_accents
lowerCamelCase__ =vocab_file
lowerCamelCase__ =False if not self.vocab_file else True
def _a ( self , _lowerCamelCase , _lowerCamelCase = None ):
lowerCamelCase__ =[self.sep_token_id]
lowerCamelCase__ =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _a ( self , _lowerCamelCase , _lowerCamelCase = None ):
lowerCamelCase__ =[self.sep_token_id]
lowerCamelCase__ =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self , _lowerCamelCase , _lowerCamelCase = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(_lowerCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase__ =os.path.join(
_lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ):
copyfile(self.vocab_file , _lowerCamelCase )
return (out_vocab_file,)
| 530 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def __init__(self , __a , __a=7 , __a=3 , __a=18 , __a=30 , __a=4_00 , __a=True , __a=None , __a=True , __a=False , __a=True , __a=True , __a=[0.5, 0.5, 0.5] , __a=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = num_channels
lowerCamelCase = image_size
lowerCamelCase = min_resolution
lowerCamelCase = max_resolution
lowerCamelCase = do_resize
lowerCamelCase = size if size is not None else {"height": 18, "width": 20}
lowerCamelCase = do_thumbnail
lowerCamelCase = do_align_axis
lowerCamelCase = do_pad
lowerCamelCase = do_normalize
lowerCamelCase = image_mean
lowerCamelCase = image_std
def _a (self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowerCamelCase__ ( UpperCAmelCase_ , unittest.TestCase):
"""simple docstring"""
_A = DonutImageProcessor if is_vision_available() else None
def _a (self ):
'''simple docstring'''
lowerCamelCase = DonutImageProcessingTester(self )
@property
def _a (self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , "do_resize" ) )
self.assertTrue(hasattr(__a , "size" ) )
self.assertTrue(hasattr(__a , "do_thumbnail" ) )
self.assertTrue(hasattr(__a , "do_align_long_axis" ) )
self.assertTrue(hasattr(__a , "do_pad" ) )
self.assertTrue(hasattr(__a , "do_normalize" ) )
self.assertTrue(hasattr(__a , "image_mean" ) )
self.assertTrue(hasattr(__a , "image_std" ) )
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 20} )
lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
# Previous config had dimensions in (width, height) order
lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {"height": 84, "width": 42} )
def _a (self ):
'''simple docstring'''
pass
@is_flaky()
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
lowerCamelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase = image_processing(__a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
@is_flaky()
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
lowerCamelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase = image_processing(__a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
@is_flaky()
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
lowerCamelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase = image_processing(__a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , ) | 484 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
a_ : int = logging.get_logger(__name__)
a_ : List[str] = {'vocab_file': 'spiece.model'}
a_ : Optional[Any] = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class lowerCamelCase__ ( UpperCAmelCase_):
"""simple docstring"""
def __init__(self , __a , __a=False , __a=True , __a=False , __a="<s>" , __a="</s>" , __a="<unk>" , __a="<sep>" , __a="<pad>" , __a="<cls>" , __a="<mask>" , __a=["<eop>", "<eod>"] , __a = None , **__a , ):
'''simple docstring'''
lowerCamelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__a , remove_space=__a , keep_accents=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , additional_special_tokens=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
lowerCamelCase = 3
lowerCamelCase = do_lower_case
lowerCamelCase = remove_space
lowerCamelCase = keep_accents
lowerCamelCase = vocab_file
lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__a )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
lowerCamelCase = jieba
lowerCamelCase = str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _a (self ):
'''simple docstring'''
return len(self.sp_model )
def _a (self ):
'''simple docstring'''
lowerCamelCase = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ):
'''simple docstring'''
lowerCamelCase = self.__dict__.copy()
lowerCamelCase = None
return state
def __setstate__(self , __a ):
'''simple docstring'''
lowerCamelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase = {}
lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a (self , __a ):
'''simple docstring'''
if self.remove_space:
lowerCamelCase = " ".join(inputs.strip().split() )
else:
lowerCamelCase = inputs
lowerCamelCase = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
lowerCamelCase = unicodedata.normalize("NFKD" , __a )
lowerCamelCase = "".join([c for c in outputs if not unicodedata.combining(__a )] )
if self.do_lower_case:
lowerCamelCase = outputs.lower()
return outputs
def _a (self , __a ):
'''simple docstring'''
lowerCamelCase = self.preprocess_text(__a )
lowerCamelCase = self.sp_model.encode(__a , out_type=__a )
lowerCamelCase = []
for piece in pieces:
if len(__a ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
lowerCamelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(__a , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase = cur_pieces[1:]
else:
lowerCamelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__a )
else:
new_pieces.append(__a )
return new_pieces
def _a (self , __a ):
'''simple docstring'''
return self.sp_model.PieceToId(__a )
def _a (self , __a ):
'''simple docstring'''
return self.sp_model.IdToPiece(__a )
def _a (self , __a ):
'''simple docstring'''
lowerCamelCase = "".join(__a ).replace(__a , " " ).strip()
return out_string
def _a (self , __a , __a = None ):
'''simple docstring'''
lowerCamelCase = [self.sep_token_id]
lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _a (self , __a , __a = None , __a = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is not None:
return ([0] * len(__a )) + [1] + ([0] * len(__a )) + [1, 1]
return ([0] * len(__a )) + [1, 1]
def _a (self , __a , __a = None ):
'''simple docstring'''
lowerCamelCase = [self.sep_token_id]
lowerCamelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _a (self , __a , __a = None ):
'''simple docstring'''
if not os.path.isdir(__a ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , "wb" ) as fi:
lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
def _a (self , *__a , **__a ):
'''simple docstring'''
lowerCamelCase = super()._decode(*__a , **__a )
lowerCamelCase = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text | 484 | 1 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
__A = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def lowercase__ ( A_: int , A_: Optional[Any] , A_: List[str]=None ) -> List[str]:
"""simple docstring"""
if rng is None:
__UpperCAmelCase =random.Random()
__UpperCAmelCase =1
for dim in shape:
total_dims *= dim
__UpperCAmelCase =[]
for _ in range(A_ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
__UpperCAmelCase =np.array(A_ , dtype=jnp.intaa ).reshape(A_ )
return output
def lowercase__ ( A_: List[str] , A_: List[str]=None ) -> Any:
"""simple docstring"""
__UpperCAmelCase =ids_tensor(A_ , vocab_size=2 , rng=A_ )
# make sure that at least one token is attended to for each batch
__UpperCAmelCase =1
return attn_mask
@require_flax
class _A :
"""simple docstring"""
lowerCamelCase : Optional[Any] = None
lowerCamelCase : int = ()
def _a ( self : str ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
__UpperCAmelCase =2
__UpperCAmelCase =inputs["""input_ids"""].shape[-1] // 2
__UpperCAmelCase =inputs["""input_ids"""][:max_batch_size, :sequence_length]
__UpperCAmelCase =jnp.ones_like(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
__UpperCAmelCase =input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
__UpperCAmelCase =config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def _a ( self : Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =False
__UpperCAmelCase =max_length
__UpperCAmelCase =0
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model_class.__name__[4:] # Skip the "Flax" at the beginning
__UpperCAmelCase =getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =pt_model_class(__SCREAMING_SNAKE_CASE ).eval()
__UpperCAmelCase =load_flax_weights_in_pytorch_model(__SCREAMING_SNAKE_CASE , flax_model.params )
__UpperCAmelCase =flax_model.generate(__SCREAMING_SNAKE_CASE ).sequences
__UpperCAmelCase =pt_model.generate(torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
__UpperCAmelCase =flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def _a ( self : Optional[int] ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =False
__UpperCAmelCase =max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Union[str, Any] ) -> List[str]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =True
__UpperCAmelCase =max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : List[Any] ) -> Any:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =False
__UpperCAmelCase =max_length
__UpperCAmelCase =2
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Any ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =False
__UpperCAmelCase =max_length
__UpperCAmelCase =2
__UpperCAmelCase =2
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def _a ( self : Union[str, Any] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =True
__UpperCAmelCase =max_length
__UpperCAmelCase =0.8
__UpperCAmelCase =10
__UpperCAmelCase =0.3
__UpperCAmelCase =1
__UpperCAmelCase =8
__UpperCAmelCase =9
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =max_length
__UpperCAmelCase =1
__UpperCAmelCase =8
__UpperCAmelCase =9
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Optional[int] ) -> Any:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =max_length
__UpperCAmelCase =2
__UpperCAmelCase =1
__UpperCAmelCase =8
__UpperCAmelCase =9
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : List[str] ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
# pad attention mask on the left
__UpperCAmelCase =attention_mask.at[(0, 0)].set(0 )
__UpperCAmelCase =False
__UpperCAmelCase =max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Dict ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
# pad attention mask on the left
__UpperCAmelCase =attention_mask.at[(0, 0)].set(0 )
__UpperCAmelCase =True
__UpperCAmelCase =max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Dict ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
# pad attention mask on the left
__UpperCAmelCase =attention_mask.at[(0, 0)].set(0 )
__UpperCAmelCase =2
__UpperCAmelCase =max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class _A ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : int ) -> Any:
__UpperCAmelCase =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-bert""" )
__UpperCAmelCase =FlaxAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
__UpperCAmelCase ="""Hello world"""
__UpperCAmelCase =tokenizer(__SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , """do_samples""" ):
model.generate(__SCREAMING_SNAKE_CASE , do_samples=__SCREAMING_SNAKE_CASE )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , """foo""" ):
__UpperCAmelCase ={"""foo""": """bar"""}
model.generate(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 68 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
a__ : int = 0
def __lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
a__ : Optional[int] = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(A__ , A__ )
def __lowerCAmelCase ( self : Dict ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : List[Any] = Path(A__ ) / '''preprocessor_config.json'''
a__ : List[Any] = Path(A__ ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(A__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A__ , '''w''' ) )
a__ : Any = AutoImageProcessor.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : int = Path(A__ ) / '''preprocessor_config.json'''
a__ : Optional[Any] = Path(A__ ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(A__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A__ , '''w''' ) )
a__ : Tuple = AutoImageProcessor.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : Dict = CLIPConfig()
# Create a dummy config file with image_proceesor_type
a__ : int = Path(A__ ) / '''preprocessor_config.json'''
a__ : Optional[int] = Path(A__ ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(A__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A__ , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
a__ : List[Any] = AutoImageProcessor.from_pretrained(A__ ).to_dict()
config_dict.pop('''image_processor_type''' )
a__ : Union[str, Any] = CLIPImageProcessor(**A__ )
# save in new folder
model_config.save_pretrained(A__ )
config.save_pretrained(A__ )
a__ : Union[str, Any] = AutoImageProcessor.from_pretrained(A__ )
# make sure private variable is not incorrectly saved
a__ : Optional[Any] = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(A__ , A__ )
def __lowerCAmelCase ( self : str ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : Optional[int] = Path(A__ ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(A__ , '''w''' ) , )
a__ : Any = AutoImageProcessor.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
A__ , '''clip-base is not a local folder and is not a valid model identifier''' ):
a__ : str = AutoImageProcessor.from_pretrained('''clip-base''' )
def __lowerCAmelCase ( self : Optional[Any] ) -> int:
'''simple docstring'''
with self.assertRaisesRegex(
A__ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
a__ : Tuple = AutoImageProcessor.from_pretrained(A__ , revision='''aaaaaa''' )
def __lowerCAmelCase ( self : str ) -> List[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
A__ , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
a__ : Union[str, Any] = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
'''simple docstring'''
with self.assertRaises(A__ ):
a__ : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(A__ ):
a__ : Tuple = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A__ )
a__ : Tuple = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(A__ )
a__ : str = AutoImageProcessor.from_pretrained(A__ , trust_remote_code=A__ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
'''simple docstring'''
try:
AutoConfig.register('''custom''' , A__ )
AutoImageProcessor.register(A__ , A__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A__ ):
AutoImageProcessor.register(A__ , A__ )
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : Optional[int] = Path(A__ ) / '''preprocessor_config.json'''
a__ : List[str] = Path(A__ ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(A__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A__ , '''w''' ) )
a__ : Tuple = CustomImageProcessor.from_pretrained(A__ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(A__ )
a__ : Tuple = AutoImageProcessor.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __lowerCAmelCase ( self : List[Any] ) -> List[str]:
'''simple docstring'''
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = True
try:
AutoConfig.register('''custom''' , A__ )
AutoImageProcessor.register(A__ , A__ )
# If remote code is not set, the default is to use local
a__ : Dict = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
a__ : Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
a__ : Optional[int] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(A__ , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 688 | 0 |
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class _UpperCAmelCase ( lowercase ):
def __init__( self : List[str]):
SCREAMING_SNAKE_CASE_ :Dict = []
def _snake_case ( self : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , **UpperCAmelCase : Dict):
self.events.append("on_init_end")
def _snake_case ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , **UpperCAmelCase : List[str]):
self.events.append("on_train_begin")
def _snake_case ( self : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , **UpperCAmelCase : int):
self.events.append("on_train_end")
def _snake_case ( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any] , **UpperCAmelCase : List[str]):
self.events.append("on_epoch_begin")
def _snake_case ( self : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , **UpperCAmelCase : Dict):
self.events.append("on_epoch_end")
def _snake_case ( self : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , **UpperCAmelCase : Tuple):
self.events.append("on_step_begin")
def _snake_case ( self : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , **UpperCAmelCase : int):
self.events.append("on_step_end")
def _snake_case ( self : int , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , **UpperCAmelCase : List[str]):
self.events.append("on_evaluate")
def _snake_case ( self : str , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any] , **UpperCAmelCase : Optional[int]):
self.events.append("on_predict")
def _snake_case ( self : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[int]):
self.events.append("on_save")
def _snake_case ( self : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Any):
self.events.append("on_log")
def _snake_case ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , **UpperCAmelCase : int):
self.events.append("on_prediction_step")
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
def _snake_case ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_ :Dict = tempfile.mkdtemp()
def _snake_case ( self : Optional[int]):
shutil.rmtree(self.output_dir)
def _snake_case ( self : int , UpperCAmelCase : List[Any]=0 , UpperCAmelCase : Tuple=0 , UpperCAmelCase : int=64 , UpperCAmelCase : str=64 , UpperCAmelCase : List[Any]=None , UpperCAmelCase : int=False , **UpperCAmelCase : Union[str, Any]):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
SCREAMING_SNAKE_CASE_ :Union[str, Any] = RegressionDataset(length=UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Any = RegressionDataset(length=UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Tuple = RegressionModelConfig(a=UpperCAmelCase , b=UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :List[str] = RegressionPreTrainedModel(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :List[Any] = TrainingArguments(self.output_dir , disable_tqdm=UpperCAmelCase , report_to=[] , **UpperCAmelCase)
return Trainer(
UpperCAmelCase , UpperCAmelCase , train_dataset=UpperCAmelCase , eval_dataset=UpperCAmelCase , callbacks=UpperCAmelCase , )
def _snake_case ( self : Any , UpperCAmelCase : Any , UpperCAmelCase : Any):
self.assertEqual(len(UpperCAmelCase) , len(UpperCAmelCase))
# Order doesn't matter
SCREAMING_SNAKE_CASE_ :Any = sorted(UpperCAmelCase , key=lambda UpperCAmelCase: cb.__name__ if isinstance(UpperCAmelCase , UpperCAmelCase) else cb.__class__.__name__)
SCREAMING_SNAKE_CASE_ :Tuple = sorted(UpperCAmelCase , key=lambda UpperCAmelCase: cb.__name__ if isinstance(UpperCAmelCase , UpperCAmelCase) else cb.__class__.__name__)
for cba, cba in zip(UpperCAmelCase , UpperCAmelCase):
if isinstance(UpperCAmelCase , UpperCAmelCase) and isinstance(UpperCAmelCase , UpperCAmelCase):
self.assertEqual(UpperCAmelCase , UpperCAmelCase)
elif isinstance(UpperCAmelCase , UpperCAmelCase) and not isinstance(UpperCAmelCase , UpperCAmelCase):
self.assertEqual(UpperCAmelCase , cba.__class__)
elif not isinstance(UpperCAmelCase , UpperCAmelCase) and isinstance(UpperCAmelCase , UpperCAmelCase):
self.assertEqual(cba.__class__ , UpperCAmelCase)
else:
self.assertEqual(UpperCAmelCase , UpperCAmelCase)
def _snake_case ( self : Optional[Any] , UpperCAmelCase : Union[str, Any]):
SCREAMING_SNAKE_CASE_ :List[str] = ["on_init_end", "on_train_begin"]
SCREAMING_SNAKE_CASE_ :List[Any] = 0
SCREAMING_SNAKE_CASE_ :str = len(trainer.get_eval_dataloader())
SCREAMING_SNAKE_CASE_ :Tuple = ["on_prediction_step"] * len(trainer.get_eval_dataloader()) + ["on_log", "on_evaluate"]
for _ in range(trainer.state.num_train_epochs):
expected_events.append("on_epoch_begin")
for _ in range(UpperCAmelCase):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("on_log")
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("on_save")
expected_events.append("on_epoch_end")
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _snake_case ( self : Any):
SCREAMING_SNAKE_CASE_ :List[Any] = self.get_trainer()
SCREAMING_SNAKE_CASE_ :Dict = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase)
# Callbacks passed at init are added to the default callbacks
SCREAMING_SNAKE_CASE_ :List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback])
expected_callbacks.append(UpperCAmelCase)
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase)
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
SCREAMING_SNAKE_CASE_ :int = self.get_trainer(disable_tqdm=UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Optional[Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase)
def _snake_case ( self : Any):
SCREAMING_SNAKE_CASE_ :List[Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
SCREAMING_SNAKE_CASE_ :Optional[Any] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(UpperCAmelCase)
expected_callbacks.remove(UpperCAmelCase)
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :List[Any] = self.get_trainer()
SCREAMING_SNAKE_CASE_ :Optional[Any] = trainer.pop_callback(UpperCAmelCase)
self.assertEqual(cb.__class__ , UpperCAmelCase)
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase)
trainer.add_callback(UpperCAmelCase)
expected_callbacks.insert(0 , UpperCAmelCase)
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase)
# We can also add, pop, or remove by instance
SCREAMING_SNAKE_CASE_ :str = self.get_trainer()
SCREAMING_SNAKE_CASE_ :Optional[int] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(UpperCAmelCase)
expected_callbacks.remove(UpperCAmelCase)
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :str = self.get_trainer()
SCREAMING_SNAKE_CASE_ :Union[str, Any] = trainer.callback_handler.callbacks[0]
SCREAMING_SNAKE_CASE_ :Optional[int] = trainer.pop_callback(UpperCAmelCase)
self.assertEqual(UpperCAmelCase , UpperCAmelCase)
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase)
trainer.add_callback(UpperCAmelCase)
expected_callbacks.insert(0 , UpperCAmelCase)
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase)
def _snake_case ( self : Any):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="ignore" , category=UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :str = self.get_trainer(callbacks=[MyTestTrainerCallback])
trainer.train()
SCREAMING_SNAKE_CASE_ :Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCAmelCase , self.get_expected_events(UpperCAmelCase))
# Independent log/save/eval
SCREAMING_SNAKE_CASE_ :Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5)
trainer.train()
SCREAMING_SNAKE_CASE_ :str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCAmelCase , self.get_expected_events(UpperCAmelCase))
SCREAMING_SNAKE_CASE_ :List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5)
trainer.train()
SCREAMING_SNAKE_CASE_ :str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCAmelCase , self.get_expected_events(UpperCAmelCase))
SCREAMING_SNAKE_CASE_ :str = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="steps")
trainer.train()
SCREAMING_SNAKE_CASE_ :Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCAmelCase , self.get_expected_events(UpperCAmelCase))
SCREAMING_SNAKE_CASE_ :Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="epoch")
trainer.train()
SCREAMING_SNAKE_CASE_ :Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCAmelCase , self.get_expected_events(UpperCAmelCase))
# A bit of everything
SCREAMING_SNAKE_CASE_ :Tuple = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="steps" , )
trainer.train()
SCREAMING_SNAKE_CASE_ :List[Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCAmelCase , self.get_expected_events(UpperCAmelCase))
# warning should be emitted for duplicated callbacks
with patch("transformers.trainer_callback.logger.warning") as warn_mock:
SCREAMING_SNAKE_CASE_ :int = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(UpperCAmelCase) in warn_mock.call_args[0][0]
| 140 |
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCAmelCase ( lowercase , unittest.TestCase ):
lowerCamelCase_ : Tuple = CodeGenTokenizer
lowerCamelCase_ : str = CodeGenTokenizerFast
lowerCamelCase_ : List[str] = True
lowerCamelCase_ : Dict = {"""add_prefix_space""": True}
lowerCamelCase_ : Any = False
def _snake_case ( self : Optional[Any]):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE_ :Optional[int] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
SCREAMING_SNAKE_CASE_ :List[Any] = dict(zip(UpperCAmelCase , range(len(UpperCAmelCase))))
SCREAMING_SNAKE_CASE_ :Optional[int] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
SCREAMING_SNAKE_CASE_ :str = {"unk_token": "<unk>"}
SCREAMING_SNAKE_CASE_ :int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
SCREAMING_SNAKE_CASE_ :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as fp:
fp.write(json.dumps(UpperCAmelCase) + "\n")
with open(self.merges_file , "w" , encoding="utf-8") as fp:
fp.write("\n".join(UpperCAmelCase))
def _snake_case ( self : Dict , **UpperCAmelCase : List[str]):
kwargs.update(self.special_tokens_map)
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase)
def _snake_case ( self : Optional[Any] , **UpperCAmelCase : Any):
kwargs.update(self.special_tokens_map)
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase)
def _snake_case ( self : Any , UpperCAmelCase : List[Any]):
SCREAMING_SNAKE_CASE_ :Tuple = "lower newer"
SCREAMING_SNAKE_CASE_ :Tuple = "lower newer"
return input_text, output_text
def _snake_case ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_ :Optional[int] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
SCREAMING_SNAKE_CASE_ :List[Any] = "lower newer"
SCREAMING_SNAKE_CASE_ :Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
SCREAMING_SNAKE_CASE_ :Tuple = tokenizer.tokenize(UpperCAmelCase , add_prefix_space=UpperCAmelCase)
self.assertListEqual(UpperCAmelCase , UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :List[Any] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ :Optional[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase) , UpperCAmelCase)
def _snake_case ( self : str):
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ :List[str] = self.get_rust_tokenizer(add_prefix_space=UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :List[str] = "lower newer"
# Testing tokenization
SCREAMING_SNAKE_CASE_ :Dict = tokenizer.tokenize(UpperCAmelCase , add_prefix_space=UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Optional[int] = rust_tokenizer.tokenize(UpperCAmelCase)
self.assertListEqual(UpperCAmelCase , UpperCAmelCase)
# Testing conversion to ids without special tokens
SCREAMING_SNAKE_CASE_ :Union[str, Any] = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase , add_prefix_space=UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :int = rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase)
self.assertListEqual(UpperCAmelCase , UpperCAmelCase)
# Testing conversion to ids with special tokens
SCREAMING_SNAKE_CASE_ :Optional[Any] = self.get_rust_tokenizer(add_prefix_space=UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Dict = tokenizer.encode(UpperCAmelCase , add_prefix_space=UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Optional[int] = rust_tokenizer.encode(UpperCAmelCase)
self.assertListEqual(UpperCAmelCase , UpperCAmelCase)
# Testing the unknown token
SCREAMING_SNAKE_CASE_ :Union[str, Any] = tokens + [rust_tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ :Optional[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(UpperCAmelCase) , UpperCAmelCase)
def _snake_case ( self : List[Any] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[Any]):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def _snake_case ( self : Tuple , UpperCAmelCase : Any=15):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"):
SCREAMING_SNAKE_CASE_ :Any = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase)
# Simple input
SCREAMING_SNAKE_CASE_ :List[str] = "This is a simple input"
SCREAMING_SNAKE_CASE_ :Optional[Any] = ["This is a simple input 1", "This is a simple input 2"]
SCREAMING_SNAKE_CASE_ :Union[str, Any] = ("This is a simple input", "This is a pair")
SCREAMING_SNAKE_CASE_ :Optional[int] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(UpperCAmelCase , tokenizer_r.encode , UpperCAmelCase , max_length=UpperCAmelCase , padding="max_length")
# Simple input
self.assertRaises(UpperCAmelCase , tokenizer_r.encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding="max_length")
# Simple input
self.assertRaises(
UpperCAmelCase , tokenizer_r.batch_encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding="max_length" , )
# Pair input
self.assertRaises(UpperCAmelCase , tokenizer_r.encode , UpperCAmelCase , max_length=UpperCAmelCase , padding="max_length")
# Pair input
self.assertRaises(UpperCAmelCase , tokenizer_r.encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding="max_length")
# Pair input
self.assertRaises(
UpperCAmelCase , tokenizer_r.batch_encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding="max_length" , )
def _snake_case ( self : Tuple):
SCREAMING_SNAKE_CASE_ :Tuple = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>")
# Simple input
SCREAMING_SNAKE_CASE_ :List[Any] = "This is a simple input"
SCREAMING_SNAKE_CASE_ :List[Any] = ["This is a simple input looooooooong", "This is a simple input"]
SCREAMING_SNAKE_CASE_ :Optional[int] = ("This is a simple input", "This is a pair")
SCREAMING_SNAKE_CASE_ :Optional[int] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
SCREAMING_SNAKE_CASE_ :Any = tokenizer.pad_token_id
SCREAMING_SNAKE_CASE_ :List[str] = tokenizer(UpperCAmelCase , padding="max_length" , max_length=30 , return_tensors="np")
SCREAMING_SNAKE_CASE_ :Optional[Any] = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , truncate=UpperCAmelCase , return_tensors="np")
SCREAMING_SNAKE_CASE_ :Optional[Any] = tokenizer(*UpperCAmelCase , padding="max_length" , max_length=60 , return_tensors="np")
SCREAMING_SNAKE_CASE_ :Optional[Any] = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , truncate=UpperCAmelCase , return_tensors="np")
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30)
self.assertTrue(pad_token_id in out_s["input_ids"])
self.assertTrue(0 in out_s["attention_mask"])
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33)
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0])
self.assertFalse(0 in out_sa["attention_mask"][0])
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1])
self.assertTrue(0 in out_sa["attention_mask"][1])
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60)
self.assertTrue(pad_token_id in out_p["input_ids"])
self.assertTrue(0 in out_p["attention_mask"])
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52)
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0])
self.assertFalse(0 in out_pa["attention_mask"][0])
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1])
self.assertTrue(0 in out_pa["attention_mask"][1])
def _snake_case ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_ :Tuple = "$$$"
SCREAMING_SNAKE_CASE_ :str = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=UpperCAmelCase , add_bos_token=UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Optional[int] = "This is a simple input"
SCREAMING_SNAKE_CASE_ :int = ["This is a simple input 1", "This is a simple input 2"]
SCREAMING_SNAKE_CASE_ :List[str] = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE_ :int = tokenizer(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Optional[int] = tokenizer(UpperCAmelCase)
self.assertEqual(out_s.input_ids[0] , UpperCAmelCase)
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids))
SCREAMING_SNAKE_CASE_ :Optional[Any] = tokenizer.decode(out_s.input_ids)
SCREAMING_SNAKE_CASE_ :List[str] = tokenizer.batch_decode(out_sa.input_ids)
self.assertEqual(decode_s.split()[0] , UpperCAmelCase)
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa))
@slow
def _snake_case ( self : Tuple):
SCREAMING_SNAKE_CASE_ :Optional[int] = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono")
SCREAMING_SNAKE_CASE_ :List[str] = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
SCREAMING_SNAKE_CASE_ :str = "\nif len_a > len_b: result = a\nelse: result = b"
SCREAMING_SNAKE_CASE_ :Optional[int] = tokenizer.encode(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Optional[Any] = ["^#", re.escape("<|endoftext|>"), "^'''", "^\"\"\"", "\n\n\n"]
SCREAMING_SNAKE_CASE_ :Any = tokenizer.decode(UpperCAmelCase , truncate_before_pattern=UpperCAmelCase)
self.assertEqual(UpperCAmelCase , UpperCAmelCase)
def _snake_case ( self : Dict):
pass
| 140 | 1 |
"""simple docstring"""
class __UpperCAmelCase : # Public class to implement a graph
"""simple docstring"""
def __init__( self : Dict , A_ : int , A_ : int , A_ : list[list[bool]] )-> None:
__UpperCamelCase = row
__UpperCamelCase = col
__UpperCamelCase = graph
def A ( self : List[str] , A_ : int , A_ : int , A_ : list[list[bool]] )-> bool:
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def A ( self : int , A_ : int , A_ : int , A_ : list[list[bool]] )-> None:
# Checking all 8 elements surrounding nth element
__UpperCamelCase = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__UpperCamelCase = [-1, 0, 1, -1, 1, -1, 0, 1]
__UpperCamelCase = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , A_ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , A_ )
def A ( self : Optional[Any] )-> int: # And finally, count all islands.
__UpperCamelCase = [[False for j in range(self.COL )] for i in range(self.ROW )]
__UpperCamelCase = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(A_ , A_ , A_ )
count += 1
return count | 505 |
"""simple docstring"""
_A = 256
# Modulus to hash a string
_A = 1_000_003
def lowercase (_snake_case ,_snake_case ) -> bool:
'''simple docstring'''
__UpperCamelCase = len(_snake_case )
__UpperCamelCase = len(_snake_case )
if p_len > t_len:
return False
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = 1
# Calculating the hash of pattern and substring of text
for i in range(_snake_case ):
__UpperCamelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__UpperCamelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__UpperCamelCase = (modulus_power * alphabet_size) % modulus
for i in range(0 ,t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__UpperCamelCase = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def lowercase () -> None:
'''simple docstring'''
__UpperCamelCase = "abc1abc12"
__UpperCamelCase = "alskfjaldsabc1abc1abc12k23adsfabcabc"
__UpperCamelCase = "alskfjaldsk23adsfabcabc"
assert rabin_karp(_snake_case ,_snake_case ) and not rabin_karp(_snake_case ,_snake_case )
# Test 2)
__UpperCamelCase = "ABABX"
__UpperCamelCase = "ABABZABABYABABX"
assert rabin_karp(_snake_case ,_snake_case )
# Test 3)
__UpperCamelCase = "AAAB"
__UpperCamelCase = "ABAAAAAB"
assert rabin_karp(_snake_case ,_snake_case )
# Test 4)
__UpperCamelCase = "abcdabcy"
__UpperCamelCase = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(_snake_case ,_snake_case )
# Test 5)
__UpperCamelCase = "Lü"
__UpperCamelCase = "Lüsai"
assert rabin_karp(_snake_case ,_snake_case )
__UpperCamelCase = "Lue"
assert not rabin_karp(_snake_case ,_snake_case )
print("Success." )
if __name__ == "__main__":
test_rabin_karp() | 505 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
def lowerCAmelCase_ ( __UpperCAmelCase: str ) -> YolosConfig:
UpperCamelCase__ : str = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
UpperCamelCase__ : Dict = 192
UpperCamelCase__ : Dict = 768
UpperCamelCase__ : Optional[Any] = 12
UpperCamelCase__ : List[Any] = 3
UpperCamelCase__ : Optional[int] = [800, 1333]
UpperCamelCase__ : Tuple = False
elif yolos_name == "yolos_s_dWr":
UpperCamelCase__ : int = 330
UpperCamelCase__ : Tuple = 14
UpperCamelCase__ : str = 6
UpperCamelCase__ : Optional[int] = 1320
elif "yolos_s" in yolos_name:
UpperCamelCase__ : Optional[int] = 384
UpperCamelCase__ : Any = 1536
UpperCamelCase__ : Union[str, Any] = 12
UpperCamelCase__ : int = 6
elif "yolos_b" in yolos_name:
UpperCamelCase__ : Dict = [800, 1344]
UpperCamelCase__ : List[Any] = 91
UpperCamelCase__ : str = '''huggingface/label-files'''
UpperCamelCase__ : Dict = '''coco-detection-id2label.json'''
UpperCamelCase__ : List[str] = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCamelCase__ : str = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
UpperCamelCase__ : str = idalabel
UpperCamelCase__ : Optional[int] = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( __UpperCAmelCase: dict , __UpperCAmelCase: YolosConfig , __UpperCAmelCase: bool = False ) -> str:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase__ : str = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
UpperCamelCase__ : Dict = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ : List[str] = in_proj_weight[: config.hidden_size, :]
UpperCamelCase__ : Any = in_proj_bias[: config.hidden_size]
UpperCamelCase__ : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase__ : Optional[int] = in_proj_weight[-config.hidden_size :, :]
UpperCamelCase__ : Optional[int] = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( __UpperCAmelCase: str ) -> str:
if "backbone" in name:
UpperCamelCase__ : str = name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
UpperCamelCase__ : str = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
UpperCamelCase__ : str = name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
UpperCamelCase__ : List[str] = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
UpperCamelCase__ : List[Any] = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
UpperCamelCase__ : Any = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
UpperCamelCase__ : Dict = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
UpperCamelCase__ : Optional[int] = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
UpperCamelCase__ : int = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCamelCase__ : str = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCamelCase__ : Optional[Any] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCamelCase__ : List[str] = name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
UpperCamelCase__ : List[Any] = name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
UpperCamelCase__ : Optional[int] = name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
UpperCamelCase__ : Tuple = name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def lowerCAmelCase_ ( __UpperCAmelCase: dict , __UpperCAmelCase: YolosForObjectDetection ) -> dict:
for key in orig_state_dict.copy().keys():
UpperCamelCase__ : Union[str, Any] = orig_state_dict.pop(__UpperCAmelCase )
if "qkv" in key:
UpperCamelCase__ : Optional[int] = key.split('''.''' )
UpperCamelCase__ : Union[str, Any] = int(key_split[2] )
UpperCamelCase__ : Tuple = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
UpperCamelCase__ : Dict = val[:dim, :]
UpperCamelCase__ : int = val[
dim : dim * 2, :
]
UpperCamelCase__ : Dict = val[-dim:, :]
else:
UpperCamelCase__ : List[Any] = val[:dim]
UpperCamelCase__ : List[str] = val[dim : dim * 2]
UpperCamelCase__ : int = val[-dim:]
else:
UpperCamelCase__ : Union[str, Any] = val
return orig_state_dict
def lowerCAmelCase_ ( ) -> torch.Tensor:
UpperCamelCase__ : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase__ : Dict = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __UpperCAmelCase: str , __UpperCAmelCase: str , __UpperCAmelCase: str , __UpperCAmelCase: bool = False ) -> Optional[int]:
UpperCamelCase__ : List[str] = get_yolos_config(__UpperCAmelCase )
# load original state_dict
UpperCamelCase__ : List[Any] = torch.load(__UpperCAmelCase , map_location='''cpu''' )['''model''']
# load 🤗 model
UpperCamelCase__ : List[Any] = YolosForObjectDetection(__UpperCAmelCase )
model.eval()
UpperCamelCase__ : Dict = convert_state_dict(__UpperCAmelCase , __UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
# Check outputs on an image, prepared by YolosImageProcessor
UpperCamelCase__ : List[Any] = 800 if yolos_name != '''yolos_ti''' else 512
UpperCamelCase__ : int = YolosImageProcessor(format='''coco_detection''' , size=__UpperCAmelCase )
UpperCamelCase__ : Union[str, Any] = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCamelCase__ : Dict = model(**__UpperCAmelCase )
UpperCamelCase__ ,UpperCamelCase__ : Optional[Any] = outputs.logits, outputs.pred_boxes
UpperCamelCase__ ,UpperCamelCase__ : int = None, None
if yolos_name == "yolos_ti":
UpperCamelCase__ : List[str] = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
UpperCamelCase__ : Dict = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
UpperCamelCase__ : Optional[int] = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
UpperCamelCase__ : Union[str, Any] = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
UpperCamelCase__ : Optional[Any] = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
UpperCamelCase__ : List[Any] = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
UpperCamelCase__ : Any = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
UpperCamelCase__ : List[Any] = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
UpperCamelCase__ : Optional[Any] = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
UpperCamelCase__ : List[Any] = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(f"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , __UpperCAmelCase , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __UpperCAmelCase , atol=1e-4 )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
print(f"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
UpperCamelCase__ : Any = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''' )
UpperCamelCase__ : Optional[int] = model_mapping[yolos_name]
image_processor.push_to_hub(__UpperCAmelCase , organization='''hustvl''' )
model.push_to_hub(__UpperCAmelCase , organization='''hustvl''' )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--yolos_name',
default='yolos_s_200_pre',
type=str,
help=(
'Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','
' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'
),
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original state dict (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCAmelCase_ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 369 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
UpperCAmelCase_ = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : bool = field(default=__lowerCamelCase , metadata={"help": "Whether to use SortishSampler or not."} )
a : bool = field(
default=__lowerCamelCase , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
a : Optional[int] = field(
default=__lowerCamelCase , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
a : Optional[int] = field(
default=__lowerCamelCase , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
a : Optional[Union[str, Path, GenerationConfig]] = field(
default=__lowerCamelCase , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : Tuple = super().to_dict()
for k, v in d.items():
if isinstance(__magic_name__, __magic_name__ ):
UpperCamelCase__ : Tuple = v.to_dict()
return d
| 369 | 1 |
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
| 216 | from itertools import product
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = sides_number
SCREAMING_SNAKE_CASE_ : List[str] = max_face_number * dice_number
SCREAMING_SNAKE_CASE_ : Tuple = [0] * (max_total + 1)
SCREAMING_SNAKE_CASE_ : Tuple = 1
SCREAMING_SNAKE_CASE_ : List[str] = range(lowerCAmelCase , max_face_number + 1 )
for dice_numbers in product(lowerCAmelCase , repeat=lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] = sum(lowerCAmelCase )
totals_frequencies[total] += 1
return totals_frequencies
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = total_frequency_distribution(
sides_number=4 , dice_number=9 )
SCREAMING_SNAKE_CASE_ : List[str] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
SCREAMING_SNAKE_CASE_ : List[Any] = 0
SCREAMING_SNAKE_CASE_ : str = 9
SCREAMING_SNAKE_CASE_ : Any = 4 * 9
SCREAMING_SNAKE_CASE_ : str = 6
for peter_total in range(lowerCAmelCase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
SCREAMING_SNAKE_CASE_ : Dict = (4**9) * (6**6)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = peter_wins_count / total_games_number
SCREAMING_SNAKE_CASE_ : Optional[Any] = round(lowerCAmelCase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f'''{solution() = }''')
| 216 | 1 |
"""simple docstring"""
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
__UpperCAmelCase = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n"
__UpperCAmelCase = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n"
__UpperCAmelCase = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
return float((preds == labels).mean() )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = simple_accuracy(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = float(fa_score(y_true=__UpperCamelCase , y_pred=__UpperCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = float(pearsonr(__UpperCamelCase , __UpperCamelCase )[0] )
UpperCAmelCase__ : Optional[int] = float(spearmanr(__UpperCamelCase , __UpperCamelCase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
def __lowercase ( self : int ):
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
} ) ,codebase_urls=[] ,reference_urls=[] ,format="""numpy""" ,)
def __lowercase ( self : List[Any] ,A : Union[str, Any] ,A : List[str] ):
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(A_ ,A_ )}
elif self.config_name == "stsb":
return pearson_and_spearman(A_ ,A_ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(A_ ,A_ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(A_ ,A_ )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
| 703 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
__UpperCAmelCase = 'examples/'
__UpperCAmelCase = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
__UpperCAmelCase = {
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
__UpperCAmelCase = 'README.md'
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCAmelCase__ : Dict = f.read()
UpperCAmelCase__ , UpperCAmelCase__ : Any = REPLACE_PATTERNS[pattern]
UpperCAmelCase__ : List[str] = replace.replace("""VERSION""" , __UpperCamelCase )
UpperCAmelCase__ : Tuple = re_pattern.sub(__UpperCamelCase , __UpperCamelCase )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(__UpperCamelCase )
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
for folder, directories, fnames in os.walk(__UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase , pattern="""examples""" )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if not patch:
update_version_in_examples(__UpperCamelCase )
def lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ : Dict = """🤗 Transformers currently provides the following architectures"""
UpperCAmelCase__ : Union[str, Any] = """1. Want to contribute a new model?"""
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCAmelCase__ : str = f.readlines()
# Find the start of the list.
UpperCAmelCase__ : List[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCAmelCase__ : int = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
UpperCAmelCase__ : List[Any] = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__UpperCamelCase )
def lowerCAmelCase ( ):
'''simple docstring'''
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
UpperCAmelCase__ : Optional[Any] = f.read()
UpperCAmelCase__ : int = REPLACE_PATTERNS["""init"""][0].search(__UpperCamelCase ).groups()[0]
return packaging.version.parse(__UpperCamelCase )
def lowerCAmelCase ( __UpperCamelCase=False ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
UpperCAmelCase__ : List[str] = default_version.base_version
elif patch:
UpperCAmelCase__ : Any = F"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
UpperCAmelCase__ : Optional[Any] = F"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
UpperCAmelCase__ : Optional[int] = input(F"Which version are you releasing? [{default_version}]" )
if len(__UpperCamelCase ) == 0:
UpperCAmelCase__ : Union[str, Any] = default_version
print(F"Updating version to {version}." )
global_version_update(__UpperCamelCase , patch=__UpperCamelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = get_version()
UpperCAmelCase__ : Any = F"{current_version.major}.{current_version.minor + 1}.0.dev0"
UpperCAmelCase__ : List[Any] = current_version.base_version
# Check with the user we got that right.
UpperCAmelCase__ : Dict = input(F"Which version are we developing now? [{dev_version}]" )
if len(__UpperCamelCase ) == 0:
UpperCAmelCase__ : List[Any] = dev_version
print(F"Updating version to {version}." )
global_version_update(__UpperCamelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
__UpperCAmelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 194 | 0 |
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Any=None , **__lowerCamelCase : int ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =[x.strip() for x in open(__snake_case ).readlines()]
_UpperCAmelCase : int =[x.strip() for x in open(__snake_case ).readlines()][: len(__snake_case )]
_UpperCAmelCase : Tuple =calculate_rouge(__snake_case , __snake_case , **__snake_case )
if save_path is not None:
save_json(__snake_case , __snake_case , indent=__snake_case )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 446 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_a = logging.get_logger(__name__)
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = ['pixel_values']
def __init__( self , __a = True , __a = None , __a = PILImageResampling.BICUBIC , __a = True , __a = True , __a = 1 / 2_55 , __a = None , __a = True , __a = None , __a = None , **__a , ) -> None:
'''simple docstring'''
super().__init__(**__a)
_UpperCamelCase = size if size is not None else {'''height''': 2_24, '''width''': 2_24}
_UpperCamelCase = get_size_dict(__a)
_UpperCamelCase = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
_UpperCamelCase = get_size_dict(__a , default_to_square=__a , param_name='''crop_size''')
_UpperCamelCase = do_resize
_UpperCamelCase = do_rescale
_UpperCamelCase = do_normalize
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = size
_UpperCamelCase = resample
_UpperCamelCase = rescale_factor
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase ( self , __a , __a , __a = PILImageResampling.BILINEAR , __a = None , **__a , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(__a)
if "shortest_edge" in size:
_UpperCamelCase = get_resize_output_image_size(__a , size=size['''shortest_edge'''] , default_to_square=__a)
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_UpperCamelCase = (size['''height'''], size['''width'''])
else:
raise ValueError(F'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''')
return resize(__a , size=__a , resample=__a , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a , __a = None , **__a , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(__a)
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''')
return center_crop(__a , size=(size['''height'''], size['''width''']) , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a , __a = None , **__a) -> np.ndarray:
'''simple docstring'''
return rescale(__a , scale=__a , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a , __a , __a = None , **__a , ) -> np.ndarray:
'''simple docstring'''
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ) -> BatchFeature:
'''simple docstring'''
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase = get_size_dict(__a , param_name='''crop_size''' , default_to_square=__a)
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(__a)
if not is_batched(__a):
_UpperCamelCase = [images]
if not valid_images(__a):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''')
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(__a) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(image=__a , size=__a , resample=__a) for image in images]
if do_center_crop:
_UpperCamelCase = [self.center_crop(image=__a , size=__a) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=__a , scale=__a) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(image=__a , mean=__a , std=__a) for image in images]
_UpperCamelCase = [to_channel_dimension_format(__a , __a) for image in images]
_UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=__a , tensor_type=__a)
| 19 | 0 |
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class A_ :
def __init__( self: int ):
'''simple docstring'''
_lowerCamelCase : str = {}
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Optional[int]=1 ):
'''simple docstring'''
if self.graph.get(__lowerCAmelCase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
_lowerCamelCase : Any = [[w, v]]
if not self.graph.get(__lowerCAmelCase ):
_lowerCamelCase : Optional[int] = []
def _lowercase ( self: Any ):
'''simple docstring'''
return list(self.graph )
def _lowercase ( self: str ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Dict ):
'''simple docstring'''
if self.graph.get(__lowerCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__lowerCAmelCase )
def _lowercase ( self: List[str] ,__lowerCAmelCase: int=-2 ,__lowerCAmelCase: Optional[Any]=-1 ):
'''simple docstring'''
if s == d:
return []
_lowerCamelCase : str = []
_lowerCamelCase : List[Any] = []
if s == -2:
_lowerCamelCase : int = list(self.graph )[0]
stack.append(__lowerCAmelCase )
visited.append(__lowerCAmelCase )
_lowerCamelCase : Dict = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowerCamelCase : str = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__lowerCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_lowerCamelCase : Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__lowerCAmelCase ) != 0:
_lowerCamelCase : Optional[Any] = stack[len(__lowerCAmelCase ) - 1]
else:
_lowerCamelCase : Dict = ss
# check if se have reached the starting point
if len(__lowerCAmelCase ) == 0:
return visited
def _lowercase ( self: str ,__lowerCAmelCase: Optional[Any]=-1 ):
'''simple docstring'''
if c == -1:
_lowerCamelCase : Union[str, Any] = floor(random() * 10_000 ) + 10
for i in range(__lowerCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
_lowerCamelCase : List[Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(__lowerCAmelCase ,__lowerCAmelCase ,1 )
def _lowercase ( self: Any ,__lowerCAmelCase: List[Any]=-2 ):
'''simple docstring'''
_lowerCamelCase : int = deque()
_lowerCamelCase : List[Any] = []
if s == -2:
_lowerCamelCase : Optional[int] = list(self.graph )[0]
d.append(__lowerCAmelCase )
visited.append(__lowerCAmelCase )
while d:
_lowerCamelCase : Any = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _lowercase ( self: Tuple ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : Dict = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: str ):
'''simple docstring'''
return len(self.graph[u] )
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Optional[Any]=-2 ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = []
_lowerCamelCase : Optional[Any] = []
if s == -2:
_lowerCamelCase : Union[str, Any] = list(self.graph )[0]
stack.append(__lowerCAmelCase )
visited.append(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = s
_lowerCamelCase : Dict = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowerCamelCase : Dict = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_lowerCamelCase : Dict = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(__lowerCAmelCase ) != 0:
_lowerCamelCase : List[Any] = stack[len(__lowerCAmelCase ) - 1]
else:
_lowerCamelCase : Tuple = ss
# check if se have reached the starting point
if len(__lowerCAmelCase ) == 0:
return sorted_nodes
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : int = []
_lowerCamelCase : Any = []
_lowerCamelCase : Dict = list(self.graph )[0]
stack.append(__lowerCAmelCase )
visited.append(__lowerCAmelCase )
_lowerCamelCase : int = -2
_lowerCamelCase : Tuple = []
_lowerCamelCase : int = s
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : Optional[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowerCamelCase : Optional[int] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_lowerCamelCase : List[str] = len(__lowerCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_lowerCamelCase : int = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_lowerCamelCase : Union[str, Any] = True
if len(__lowerCAmelCase ) != 0:
_lowerCamelCase : Any = stack[len(__lowerCAmelCase ) - 1]
else:
_lowerCamelCase : Dict = False
indirect_parents.append(__lowerCAmelCase )
_lowerCamelCase : str = s
_lowerCamelCase : Optional[int] = ss
# check if se have reached the starting point
if len(__lowerCAmelCase ) == 0:
return list(__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = []
_lowerCamelCase : Optional[Any] = []
_lowerCamelCase : Any = list(self.graph )[0]
stack.append(__lowerCAmelCase )
visited.append(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = -2
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : str = s
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : Any = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowerCamelCase : Any = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_lowerCamelCase : Any = len(__lowerCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_lowerCamelCase : int = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_lowerCamelCase : Any = True
if len(__lowerCAmelCase ) != 0:
_lowerCamelCase : str = stack[len(__lowerCAmelCase ) - 1]
else:
_lowerCamelCase : Dict = False
indirect_parents.append(__lowerCAmelCase )
_lowerCamelCase : str = s
_lowerCamelCase : Dict = ss
# check if se have reached the starting point
if len(__lowerCAmelCase ) == 0:
return False
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Optional[int]=-2 ,__lowerCAmelCase: Any=-1 ):
'''simple docstring'''
_lowerCamelCase : Tuple = time()
self.dfs(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Tuple = time()
return end - begin
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: str=-2 ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = time()
self.bfs(__lowerCAmelCase )
_lowerCamelCase : List[str] = time()
return end - begin
class A_ :
def __init__( self: int ):
'''simple docstring'''
_lowerCamelCase : Dict = {}
def _lowercase ( self: List[str] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: str=1 ):
'''simple docstring'''
if self.graph.get(__lowerCAmelCase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
_lowerCamelCase : Optional[Any] = [[w, v]]
# add the other way
if self.graph.get(__lowerCAmelCase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
_lowerCamelCase : Tuple = [[w, u]]
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: int ):
'''simple docstring'''
if self.graph.get(__lowerCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__lowerCAmelCase )
# the other way round
if self.graph.get(__lowerCAmelCase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: int=-2 ,__lowerCAmelCase: Optional[Any]=-1 ):
'''simple docstring'''
if s == d:
return []
_lowerCamelCase : str = []
_lowerCamelCase : Tuple = []
if s == -2:
_lowerCamelCase : Optional[int] = list(self.graph )[0]
stack.append(__lowerCAmelCase )
visited.append(__lowerCAmelCase )
_lowerCamelCase : Dict = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowerCamelCase : Tuple = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__lowerCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_lowerCamelCase : Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__lowerCAmelCase ) != 0:
_lowerCamelCase : Union[str, Any] = stack[len(__lowerCAmelCase ) - 1]
else:
_lowerCamelCase : Optional[Any] = ss
# check if se have reached the starting point
if len(__lowerCAmelCase ) == 0:
return visited
def _lowercase ( self: int ,__lowerCAmelCase: Optional[Any]=-1 ):
'''simple docstring'''
if c == -1:
_lowerCamelCase : str = floor(random() * 10_000 ) + 10
for i in range(__lowerCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
_lowerCamelCase : Optional[Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(__lowerCAmelCase ,__lowerCAmelCase ,1 )
def _lowercase ( self: Dict ,__lowerCAmelCase: Dict=-2 ):
'''simple docstring'''
_lowerCamelCase : List[Any] = deque()
_lowerCamelCase : List[str] = []
if s == -2:
_lowerCamelCase : Tuple = list(self.graph )[0]
d.append(__lowerCAmelCase )
visited.append(__lowerCAmelCase )
while d:
_lowerCamelCase : int = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _lowercase ( self: Dict ,__lowerCAmelCase: Any ):
'''simple docstring'''
return len(self.graph[u] )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : List[str] = []
_lowerCamelCase : Optional[int] = list(self.graph )[0]
stack.append(__lowerCAmelCase )
visited.append(__lowerCAmelCase )
_lowerCamelCase : List[str] = -2
_lowerCamelCase : Any = []
_lowerCamelCase : Union[str, Any] = s
_lowerCamelCase : str = False
_lowerCamelCase : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowerCamelCase : Optional[int] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_lowerCamelCase : int = len(__lowerCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_lowerCamelCase : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_lowerCamelCase : Dict = True
if len(__lowerCAmelCase ) != 0:
_lowerCamelCase : Any = stack[len(__lowerCAmelCase ) - 1]
else:
_lowerCamelCase : Optional[int] = False
indirect_parents.append(__lowerCAmelCase )
_lowerCamelCase : List[Any] = s
_lowerCamelCase : Dict = ss
# check if se have reached the starting point
if len(__lowerCAmelCase ) == 0:
return list(__lowerCAmelCase )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Any = []
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Union[str, Any] = list(self.graph )[0]
stack.append(__lowerCAmelCase )
visited.append(__lowerCAmelCase )
_lowerCamelCase : List[Any] = -2
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Dict = s
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : Union[str, Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowerCamelCase : Optional[int] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_lowerCamelCase : Dict = len(__lowerCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_lowerCamelCase : Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_lowerCamelCase : Any = True
if len(__lowerCAmelCase ) != 0:
_lowerCamelCase : Dict = stack[len(__lowerCAmelCase ) - 1]
else:
_lowerCamelCase : int = False
indirect_parents.append(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = s
_lowerCamelCase : Any = ss
# check if se have reached the starting point
if len(__lowerCAmelCase ) == 0:
return False
def _lowercase ( self: Tuple ):
'''simple docstring'''
return list(self.graph )
def _lowercase ( self: Dict ,__lowerCAmelCase: List[str]=-2 ,__lowerCAmelCase: str=-1 ):
'''simple docstring'''
_lowerCamelCase : str = time()
self.dfs(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Any = time()
return end - begin
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Dict=-2 ):
'''simple docstring'''
_lowerCamelCase : int = time()
self.bfs(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = time()
return end - begin | 703 |
"""simple docstring"""
import argparse
import json
from tqdm import tqdm
def lowerCamelCase_( ) -> Any:
'''simple docstring'''
_lowerCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=_lowerCamelCase , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=_lowerCamelCase , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=_lowerCamelCase , help="where to store parsed gold_data_path file" , )
_lowerCamelCase : Tuple = parser.parse_args()
with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open(
args.gold_data_path , "w" ) as gold_file:
_lowerCamelCase : Union[str, Any] = json.load(_lowerCamelCase )
for dpr_record in tqdm(_lowerCamelCase ):
_lowerCamelCase : Tuple = dpr_record["question"]
_lowerCamelCase : List[str] = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n" )
gold_file.write("\t".join(_lowerCamelCase ) + "\n" )
if __name__ == "__main__":
main() | 386 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : int = list(SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = list(SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = 0
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if lista[i] != lista[i]:
count += 1
UpperCamelCase : Optional[Any] = """_"""
if count > 1:
return False
else:
return "".join(SCREAMING_SNAKE_CASE )
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
UpperCamelCase : str = []
while True:
UpperCamelCase : int = ["""$"""] * len(SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
for j in range(i + 1 , len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase : Dict = compare_string(binary[i] , binary[j] )
if k is False:
UpperCamelCase : Tuple = """*"""
UpperCamelCase : int = """*"""
temp.append("""X""" )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(SCREAMING_SNAKE_CASE ) == 0:
return pi
UpperCamelCase : Optional[int] = list(set(SCREAMING_SNAKE_CASE ) )
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : int = []
for minterm in minterms:
UpperCamelCase : List[Any] = """"""
for _ in range(SCREAMING_SNAKE_CASE ):
UpperCamelCase : str = str(minterm % 2 ) + string
minterm //= 2
temp.append(SCREAMING_SNAKE_CASE )
return temp
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[str] = list(SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = list(SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = 0
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[Any] = []
UpperCamelCase : List[Any] = [0] * len(SCREAMING_SNAKE_CASE )
for i in range(len(chart[0] ) ):
UpperCamelCase : List[Any] = 0
UpperCamelCase : List[str] = -1
for j in range(len(SCREAMING_SNAKE_CASE ) ):
if chart[j][i] == 1:
count += 1
UpperCamelCase : Any = j
if count == 1:
UpperCamelCase : Union[str, Any] = 1
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase : Tuple = 0
temp.append(prime_implicants[i] )
while True:
UpperCamelCase : Dict = 0
UpperCamelCase : List[Any] = -1
UpperCamelCase : Union[str, Any] = 0
for i in range(len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase : List[str] = chart[i].count(1 )
if count_n > max_n:
UpperCamelCase : str = count_n
UpperCamelCase : Optional[Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase : Dict = 0
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : Union[str, Any] = [[0 for x in range(len(SCREAMING_SNAKE_CASE ) )] for x in range(len(SCREAMING_SNAKE_CASE ) )]
for i in range(len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase : Optional[int] = prime_implicants[i].count("""_""" )
for j in range(len(SCREAMING_SNAKE_CASE ) ):
if is_for_table(prime_implicants[i] , binary[j] , SCREAMING_SNAKE_CASE ):
UpperCamelCase : Union[str, Any] = 1
return chart
def UpperCamelCase ():
UpperCamelCase : Optional[int] = int(input("""Enter the no. of variables\n""" ) )
UpperCamelCase : Optional[Any] = [
float(SCREAMING_SNAKE_CASE )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
UpperCamelCase : List[str] = decimal_to_binary(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase : int = check(SCREAMING_SNAKE_CASE )
print("""Prime Implicants are:""" )
print(SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = prime_implicant_chart(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = selection(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print("""Essential Prime Implicants are:""" )
print(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 102 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class _snake_case ( A__ ):
'''simple docstring'''
def __init__( self : Optional[int] ):
UpperCAmelCase_ :int = []
def snake_case_ ( self : Tuple , snake_case : Dict , snake_case : Optional[int] , snake_case : Union[str, Any] , **snake_case : Optional[Any] ):
self.events.append('''on_init_end''' )
def snake_case_ ( self : Optional[Any] , snake_case : List[Any] , snake_case : int , snake_case : int , **snake_case : Dict ):
self.events.append('''on_train_begin''' )
def snake_case_ ( self : Tuple , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : Optional[Any] , **snake_case : Any ):
self.events.append('''on_train_end''' )
def snake_case_ ( self : Tuple , snake_case : Optional[int] , snake_case : Optional[int] , snake_case : Union[str, Any] , **snake_case : Optional[int] ):
self.events.append('''on_epoch_begin''' )
def snake_case_ ( self : Optional[int] , snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : Optional[Any] , **snake_case : Optional[int] ):
self.events.append('''on_epoch_end''' )
def snake_case_ ( self : Union[str, Any] , snake_case : Any , snake_case : List[str] , snake_case : Union[str, Any] , **snake_case : Optional[int] ):
self.events.append('''on_step_begin''' )
def snake_case_ ( self : int , snake_case : Optional[int] , snake_case : List[Any] , snake_case : Any , **snake_case : str ):
self.events.append('''on_step_end''' )
def snake_case_ ( self : Union[str, Any] , snake_case : List[str] , snake_case : Any , snake_case : List[Any] , **snake_case : str ):
self.events.append('''on_evaluate''' )
def snake_case_ ( self : List[Any] , snake_case : str , snake_case : List[str] , snake_case : List[Any] , **snake_case : Dict ):
self.events.append('''on_predict''' )
def snake_case_ ( self : Optional[int] , snake_case : Any , snake_case : int , snake_case : Tuple , **snake_case : int ):
self.events.append('''on_save''' )
def snake_case_ ( self : Any , snake_case : List[str] , snake_case : Optional[int] , snake_case : Any , **snake_case : Tuple ):
self.events.append('''on_log''' )
def snake_case_ ( self : Optional[int] , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : List[str] , **snake_case : Optional[Any] ):
self.events.append('''on_prediction_step''' )
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self : Optional[Any] ):
UpperCAmelCase_ :Any = tempfile.mkdtemp()
def snake_case_ ( self : Optional[Any] ):
shutil.rmtree(self.output_dir )
def snake_case_ ( self : Optional[Any] , snake_case : Tuple=0 , snake_case : Union[str, Any]=0 , snake_case : Optional[int]=64 , snake_case : Dict=64 , snake_case : Optional[Any]=None , snake_case : List[Any]=False , **snake_case : str ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
UpperCAmelCase_ :str = RegressionDataset(length=snake_case )
UpperCAmelCase_ :Optional[Any] = RegressionDataset(length=snake_case )
UpperCAmelCase_ :List[Any] = RegressionModelConfig(a=snake_case , b=snake_case )
UpperCAmelCase_ :str = RegressionPreTrainedModel(snake_case )
UpperCAmelCase_ :Optional[int] = TrainingArguments(self.output_dir , disable_tqdm=snake_case , report_to=[] , **snake_case )
return Trainer(
snake_case , snake_case , train_dataset=snake_case , eval_dataset=snake_case , callbacks=snake_case , )
def snake_case_ ( self : str , snake_case : Tuple , snake_case : List[Any] ):
self.assertEqual(len(snake_case ) , len(snake_case ) )
# Order doesn't matter
UpperCAmelCase_ :Dict = sorted(snake_case , key=lambda snake_case : cb.__name__ if isinstance(snake_case , snake_case ) else cb.__class__.__name__ )
UpperCAmelCase_ :Union[str, Any] = sorted(snake_case , key=lambda snake_case : cb.__name__ if isinstance(snake_case , snake_case ) else cb.__class__.__name__ )
for cba, cba in zip(snake_case , snake_case ):
if isinstance(snake_case , snake_case ) and isinstance(snake_case , snake_case ):
self.assertEqual(snake_case , snake_case )
elif isinstance(snake_case , snake_case ) and not isinstance(snake_case , snake_case ):
self.assertEqual(snake_case , cba.__class__ )
elif not isinstance(snake_case , snake_case ) and isinstance(snake_case , snake_case ):
self.assertEqual(cba.__class__ , snake_case )
else:
self.assertEqual(snake_case , snake_case )
def snake_case_ ( self : Any , snake_case : Dict ):
UpperCAmelCase_ :List[Any] = ['''on_init_end''', '''on_train_begin''']
UpperCAmelCase_ :Dict = 0
UpperCAmelCase_ :Tuple = len(trainer.get_eval_dataloader() )
UpperCAmelCase_ :Dict = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(snake_case ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def snake_case_ ( self : int ):
UpperCAmelCase_ :Dict = self.get_trainer()
UpperCAmelCase_ :int = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
# Callbacks passed at init are added to the default callbacks
UpperCAmelCase_ :Any = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
UpperCAmelCase_ :Union[str, Any] = self.get_trainer(disable_tqdm=snake_case )
UpperCAmelCase_ :Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
def snake_case_ ( self : List[str] ):
UpperCAmelCase_ :List[Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
UpperCAmelCase_ :int = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(snake_case )
expected_callbacks.remove(snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
UpperCAmelCase_ :Optional[int] = self.get_trainer()
UpperCAmelCase_ :Dict = trainer.pop_callback(snake_case )
self.assertEqual(cb.__class__ , snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
trainer.add_callback(snake_case )
expected_callbacks.insert(0 , snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
# We can also add, pop, or remove by instance
UpperCAmelCase_ :List[Any] = self.get_trainer()
UpperCAmelCase_ :Optional[Any] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(snake_case )
expected_callbacks.remove(snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
UpperCAmelCase_ :Dict = self.get_trainer()
UpperCAmelCase_ :int = trainer.callback_handler.callbacks[0]
UpperCAmelCase_ :List[str] = trainer.pop_callback(snake_case )
self.assertEqual(snake_case , snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
trainer.add_callback(snake_case )
expected_callbacks.insert(0 , snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
def snake_case_ ( self : Tuple ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' , category=snake_case )
UpperCAmelCase_ :Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
UpperCAmelCase_ :int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case , self.get_expected_events(snake_case ) )
# Independent log/save/eval
UpperCAmelCase_ :Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
UpperCAmelCase_ :Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case , self.get_expected_events(snake_case ) )
UpperCAmelCase_ :Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
UpperCAmelCase_ :List[str] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case , self.get_expected_events(snake_case ) )
UpperCAmelCase_ :Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='''steps''' )
trainer.train()
UpperCAmelCase_ :List[str] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case , self.get_expected_events(snake_case ) )
UpperCAmelCase_ :Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='''epoch''' )
trainer.train()
UpperCAmelCase_ :Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case , self.get_expected_events(snake_case ) )
# A bit of everything
UpperCAmelCase_ :List[str] = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='''steps''' , )
trainer.train()
UpperCAmelCase_ :Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case , self.get_expected_events(snake_case ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
UpperCAmelCase_ :str = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(snake_case ) in warn_mock.call_args[0][0]
| 608 | 0 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class snake_case_ :
A_ = MBartConfig
A_ = {}
A_ = 'gelu'
def __init__( self : List[str] , _snake_case : Dict , _snake_case : List[Any]=13 , _snake_case : Tuple=7 , _snake_case : List[Any]=True , _snake_case : List[str]=False , _snake_case : Optional[Any]=99 , _snake_case : Optional[Any]=32 , _snake_case : str=2 , _snake_case : Dict=4 , _snake_case : Any=37 , _snake_case : str=0.1 , _snake_case : Any=0.1 , _snake_case : List[str]=20 , _snake_case : Optional[int]=2 , _snake_case : List[Any]=1 , _snake_case : str=0 , )->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = parent
__lowerCAmelCase : Dict = batch_size
__lowerCAmelCase : Optional[Any] = seq_length
__lowerCAmelCase : str = is_training
__lowerCAmelCase : Tuple = use_labels
__lowerCAmelCase : Any = vocab_size
__lowerCAmelCase : int = hidden_size
__lowerCAmelCase : List[str] = num_hidden_layers
__lowerCAmelCase : Optional[Any] = num_attention_heads
__lowerCAmelCase : int = intermediate_size
__lowerCAmelCase : int = hidden_dropout_prob
__lowerCAmelCase : Tuple = attention_probs_dropout_prob
__lowerCAmelCase : List[str] = max_position_embeddings
__lowerCAmelCase : Any = eos_token_id
__lowerCAmelCase : Any = pad_token_id
__lowerCAmelCase : str = bos_token_id
def UpperCAmelCase__ ( self : List[Any] )->Tuple:
'''simple docstring'''
__lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowerCAmelCase : List[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowerCAmelCase : Optional[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__lowerCAmelCase : Tuple = prepare_mbart_inputs_dict(_snake_case , _snake_case , _snake_case )
return config, inputs_dict
def UpperCAmelCase__ ( self : List[str] , _snake_case : List[str] , _snake_case : List[str] )->Dict:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = TFMBartModel(config=_snake_case ).get_decoder()
__lowerCAmelCase : Dict = inputs_dict["""input_ids"""]
__lowerCAmelCase : int = input_ids[:1, :]
__lowerCAmelCase : int = inputs_dict["""attention_mask"""][:1, :]
__lowerCAmelCase : Any = inputs_dict["""head_mask"""]
__lowerCAmelCase : str = 1
# first forward pass
__lowerCAmelCase : Optional[int] = model(_snake_case , attention_mask=_snake_case , head_mask=_snake_case , use_cache=_snake_case )
__lowerCAmelCase , __lowerCAmelCase : int = outputs.to_tuple()
__lowerCAmelCase : str = past_key_values[1]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Optional[Any]=None , SCREAMING_SNAKE_CASE :Optional[Any]=None , SCREAMING_SNAKE_CASE :Union[str, Any]=None , SCREAMING_SNAKE_CASE :Optional[Any]=None , SCREAMING_SNAKE_CASE :Any=None , ) -> List[Any]:
if attention_mask is None:
__lowerCAmelCase : int = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__lowerCAmelCase : List[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__lowerCAmelCase : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowerCAmelCase : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__lowerCAmelCase : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class snake_case_ ( __lowercase ,__lowercase ,unittest.TestCase ):
A_ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
A_ = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
A_ = (
{
'conversational': TFMBartForConditionalGeneration,
'feature-extraction': TFMBartModel,
'summarization': TFMBartForConditionalGeneration,
'text2text-generation': TFMBartForConditionalGeneration,
'translation': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
A_ = True
A_ = False
A_ = False
def UpperCAmelCase__ ( self : Tuple , _snake_case : str , _snake_case : Tuple , _snake_case : Union[str, Any] , _snake_case : Optional[int] , _snake_case : List[Any] )->Tuple:
'''simple docstring'''
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def UpperCAmelCase__ ( self : Tuple )->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = TFMBartModelTester(self )
__lowerCAmelCase : Optional[Any] = ConfigTester(self , config_class=_snake_case )
def UpperCAmelCase__ ( self : int )->Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Optional[Any] )->Any:
'''simple docstring'''
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_snake_case )
@require_sentencepiece
@require_tokenizers
@require_tf
class snake_case_ ( unittest.TestCase ):
A_ = [
' UN Chief Says There Is No Military Solution in Syria',
]
A_ = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
]
A_ = 'facebook/mbart-large-en-ro'
@cached_property
def UpperCAmelCase__ ( self : Optional[int] )->Tuple:
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCAmelCase__ ( self : Union[str, Any] )->Tuple:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def UpperCAmelCase__ ( self : int , **_snake_case : Dict )->List[str]:
'''simple docstring'''
__lowerCAmelCase : str = self.translate_src_text(**_snake_case )
self.assertListEqual(self.expected_text , _snake_case )
def UpperCAmelCase__ ( self : Tuple , **_snake_case : Union[str, Any] )->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Dict = self.tokenizer(self.src_text , **_snake_case , return_tensors="""tf""" )
__lowerCAmelCase : List[Any] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
__lowerCAmelCase : Optional[Any] = self.tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
return generated_words
@slow
def UpperCAmelCase__ ( self : List[Any] )->int:
'''simple docstring'''
self._assert_generated_batch_equal_expected() | 240 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=__lowercase )
class snake_case_ ( __lowercase ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
A_ = field(default='summarization' ,metadata={'include_in_asdict_even_if_is_default': True} )
A_ = Features({'text': Value('string' )} )
A_ = Features({'summary': Value('string' )} )
A_ = "text"
A_ = "summary"
@property
def UpperCAmelCase__ ( self : int )->Dict[str, str]:
'''simple docstring'''
return {self.text_column: "text", self.summary_column: "summary"} | 240 | 1 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def lowerCamelCase__ ( UpperCAmelCase_ )-> Optional[Any]:
"""simple docstring"""
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
SCREAMING_SNAKE_CASE = "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n"
class __a ( _lowerCAmelCase ):
@staticmethod
def _SCREAMING_SNAKE_CASE ( UpperCAmelCase_ : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = parser.add_parser(
"convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , )
train_parser.add_argument("--model_type" , type=_a , required=_a , help="Model's type." )
train_parser.add_argument(
"--tf_checkpoint" , type=_a , required=_a , help="TensorFlow checkpoint path or folder." )
train_parser.add_argument(
"--pytorch_dump_output" , type=_a , required=_a , help="Path to the PyTorch saved model output." )
train_parser.add_argument("--config" , type=_a , default="" , help="Configuration file path or folder." )
train_parser.add_argument(
"--finetuning_task_name" , type=_a , default=_a , help="Optional fine-tuning task name if the TF model was a finetuned model." , )
train_parser.set_defaults(func=_a )
def __init__( self : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , *UpperCAmelCase_ : Union[str, Any] , )-> Tuple:
"""simple docstring"""
UpperCamelCase = logging.get_logger("transformers-cli/converting" )
self._logger.info(f"Loading model {model_type}" )
UpperCamelCase = model_type
UpperCamelCase = tf_checkpoint
UpperCamelCase = pytorch_dump_output
UpperCamelCase = config
UpperCamelCase = finetuning_task_name
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> Any:
"""simple docstring"""
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(_a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
if "ckpt" in self._tf_checkpoint.lower():
UpperCamelCase = self._tf_checkpoint
UpperCamelCase = ""
else:
UpperCamelCase = self._tf_checkpoint
UpperCamelCase = ""
convert_transfo_xl_checkpoint_to_pytorch(
_a , self._config , self._pytorch_dump_output , _a )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
| 554 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class _snake_case ( snake_case ):
UpperCamelCase__ = 42
UpperCamelCase__ = 42
class _snake_case ( nn.Module ):
UpperCamelCase__ = 42
UpperCamelCase__ = (16, 32, 96, 256)
UpperCamelCase__ = jnp.floataa
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__magic_name__ : Any = []
for i in range(len(self.block_out_channels ) - 1 ):
__magic_name__ : Any = self.block_out_channels[i]
__magic_name__ : Optional[int] = self.block_out_channels[i + 1]
__magic_name__ : List[str] = nn.Conv(
_a , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_a )
__magic_name__ : Optional[Any] = nn.Conv(
_a , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_a )
__magic_name__ : Union[str, Any] = blocks
__magic_name__ : List[str] = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , _a ):
__magic_name__ : List[Any] = self.conv_in(_a )
__magic_name__ : Dict = nn.silu(_a )
for block in self.blocks:
__magic_name__ : Dict = block(_a )
__magic_name__ : int = nn.silu(_a )
__magic_name__ : str = self.conv_out(_a )
return embedding
@flax_register_to_config
class _snake_case ( nn.Module , snake_case , snake_case ):
UpperCamelCase__ = 32
UpperCamelCase__ = 4
UpperCamelCase__ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
UpperCamelCase__ = False
UpperCamelCase__ = (320, 640, 1280, 1280)
UpperCamelCase__ = 2
UpperCamelCase__ = 8
UpperCamelCase__ = None
UpperCamelCase__ = 1280
UpperCamelCase__ = 0.0
UpperCamelCase__ = False
UpperCamelCase__ = jnp.floataa
UpperCamelCase__ = True
UpperCamelCase__ = 0
UpperCamelCase__ = "rgb"
UpperCamelCase__ = (16, 32, 96, 256)
def SCREAMING_SNAKE_CASE ( self , _a ):
# init input tensors
__magic_name__ : List[Any] = (1, self.in_channels, self.sample_size, self.sample_size)
__magic_name__ : Dict = jnp.zeros(_a , dtype=jnp.floataa )
__magic_name__ : int = jnp.ones((1,) , dtype=jnp.intaa )
__magic_name__ : Optional[int] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__magic_name__ : List[str] = (1, 3, self.sample_size * 8, self.sample_size * 8)
__magic_name__ : Optional[int] = jnp.zeros(_a , dtype=jnp.floataa )
__magic_name__ , __magic_name__ : Dict = jax.random.split(_a )
__magic_name__ : str = {"params": params_rng, "dropout": dropout_rng}
return self.init(_a , _a , _a , _a , _a )["params"]
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Tuple = self.block_out_channels
__magic_name__ : Tuple = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__magic_name__ : str = self.num_attention_heads or self.attention_head_dim
# input
__magic_name__ : Tuple = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__magic_name__ : Union[str, Any] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__magic_name__ : str = FlaxTimestepEmbedding(_a , dtype=self.dtype )
__magic_name__ : Tuple = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
__magic_name__ : Tuple = self.only_cross_attention
if isinstance(_a , _a ):
__magic_name__ : List[Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_a , _a ):
__magic_name__ : Union[str, Any] = (num_attention_heads,) * len(self.down_block_types )
# down
__magic_name__ : List[Any] = []
__magic_name__ : Union[str, Any] = []
__magic_name__ : Any = block_out_channels[0]
__magic_name__ : str = nn.Conv(
_a , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_a )
for i, down_block_type in enumerate(self.down_block_types ):
__magic_name__ : Optional[int] = output_channel
__magic_name__ : int = block_out_channels[i]
__magic_name__ : List[str] = i == len(_a ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__magic_name__ : Union[str, Any] = FlaxCrossAttnDownBlockaD(
in_channels=_a , out_channels=_a , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
__magic_name__ : List[Any] = FlaxDownBlockaD(
in_channels=_a , out_channels=_a , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_a )
for _ in range(self.layers_per_block ):
__magic_name__ : List[Any] = nn.Conv(
_a , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_a )
if not is_final_block:
__magic_name__ : str = nn.Conv(
_a , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_a )
__magic_name__ : Any = down_blocks
__magic_name__ : Any = controlnet_down_blocks
# mid
__magic_name__ : Optional[int] = block_out_channels[-1]
__magic_name__ : List[str] = FlaxUNetMidBlockaDCrossAttn(
in_channels=_a , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
__magic_name__ : Optional[int] = nn.Conv(
_a , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , _a , _a , _a , _a , _a = 1.0 , _a = True , _a = False , ):
__magic_name__ : List[str] = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
__magic_name__ : int = jnp.flip(_a , axis=1 )
# 1. time
if not isinstance(_a , jnp.ndarray ):
__magic_name__ : Dict = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_a , jnp.ndarray ) and len(timesteps.shape ) == 0:
__magic_name__ : Optional[int] = timesteps.astype(dtype=jnp.floataa )
__magic_name__ : Dict = jnp.expand_dims(_a , 0 )
__magic_name__ : Any = self.time_proj(_a )
__magic_name__ : int = self.time_embedding(_a )
# 2. pre-process
__magic_name__ : Dict = jnp.transpose(_a , (0, 2, 3, 1) )
__magic_name__ : Any = self.conv_in(_a )
__magic_name__ : List[str] = jnp.transpose(_a , (0, 2, 3, 1) )
__magic_name__ : Tuple = self.controlnet_cond_embedding(_a )
sample += controlnet_cond
# 3. down
__magic_name__ : Optional[int] = (sample,)
for down_block in self.down_blocks:
if isinstance(_a , _a ):
__magic_name__ , __magic_name__ : List[str] = down_block(_a , _a , _a , deterministic=not train )
else:
__magic_name__ , __magic_name__ : List[Any] = down_block(_a , _a , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
__magic_name__ : int = self.mid_block(_a , _a , _a , deterministic=not train )
# 5. contronet blocks
__magic_name__ : Any = ()
for down_block_res_sample, controlnet_block in zip(_a , self.controlnet_down_blocks ):
__magic_name__ : Dict = controlnet_block(_a )
controlnet_down_block_res_samples += (down_block_res_sample,)
__magic_name__ : Optional[Any] = controlnet_down_block_res_samples
__magic_name__ : int = self.controlnet_mid_block(_a )
# 6. scaling
__magic_name__ : Dict = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=_a , mid_block_res_sample=_a )
| 124 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __UpperCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
A__ : List[str] = StableDiffusionXLImgaImgPipeline
A__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
A__ : Optional[int] = PipelineTesterMixin.required_optional_params - {'''latents'''}
A__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A__ : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
A__ : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _a ( self ):
torch.manual_seed(0 )
lowerCamelCase__ =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , attention_head_dim=(2, 4) , use_linear_projection=__a , addition_embed_type="text_time" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
lowerCamelCase__ =EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule="scaled_linear" , timestep_spacing="leading" , )
torch.manual_seed(0 )
lowerCamelCase__ =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase__ =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=32 , )
lowerCamelCase__ =CLIPTextModel(__a )
lowerCamelCase__ =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=__a )
lowerCamelCase__ =CLIPTextModelWithProjection(__a )
lowerCamelCase__ =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=__a )
lowerCamelCase__ ={
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'text_encoder_2': text_encoder_a,
'tokenizer_2': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def _a ( self , _lowerCamelCase , _lowerCamelCase=0 ):
lowerCamelCase__ =floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
lowerCamelCase__ =image / 2 + 0.5
if str(__a ).startswith("mps" ):
lowerCamelCase__ =torch.manual_seed(__a )
else:
lowerCamelCase__ =torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase__ ={
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 5.0,
'output_type': 'numpy',
'strength': 0.7_5,
}
return inputs
def _a ( self ):
lowerCamelCase__ ='cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ =self.get_dummy_components()
lowerCamelCase__ =StableDiffusionXLImgaImgPipeline(**__a )
lowerCamelCase__ =sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase__ =self.get_dummy_inputs(__a )
lowerCamelCase__ =sd_pipe(**__a ).images
lowerCamelCase__ =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase__ =np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def _a ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def _a ( self ):
pass
def _a ( self ):
lowerCamelCase__ =self.get_dummy_components()
lowerCamelCase__ =StableDiffusionXLImgaImgPipeline(**__a )
lowerCamelCase__ =sd_pipe.to(__a )
lowerCamelCase__ =sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
# forward without prompt embeds
lowerCamelCase__ =self.get_dummy_inputs(__a )
lowerCamelCase__ =3 * ['this is a negative prompt']
lowerCamelCase__ =negative_prompt
lowerCamelCase__ =3 * [inputs['prompt']]
lowerCamelCase__ =sd_pipe(**__a )
lowerCamelCase__ =output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCamelCase__ =self.get_dummy_inputs(__a )
lowerCamelCase__ =3 * ['this is a negative prompt']
lowerCamelCase__ =3 * [inputs.pop("prompt" )]
(
lowerCamelCase__
) =sd_pipe.encode_prompt(__a , negative_prompt=__a )
lowerCamelCase__ =sd_pipe(
**__a , prompt_embeds=__a , negative_prompt_embeds=__a , pooled_prompt_embeds=__a , negative_pooled_prompt_embeds=__a , )
lowerCamelCase__ =output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
def _a ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self , _lowerCamelCase , _lowerCamelCase="cpu" , _lowerCamelCase=torch.floataa , _lowerCamelCase=0 ):
lowerCamelCase__ =torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase__ =np.random.RandomState(__a ).standard_normal((1, 4, 64, 64) )
lowerCamelCase__ =torch.from_numpy(__a ).to(device=__a , dtype=__a )
lowerCamelCase__ ={
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def _a ( self ):
lowerCamelCase__ =DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base" )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase__ =self.get_inputs(__a )
lowerCamelCase__ =pipe(**__a ).images
lowerCamelCase__ =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCamelCase__ =np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 717 | """simple docstring"""
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
a =logging.get_logger(__name__)
class __UpperCAmelCase ( __lowerCAmelCase ):
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead." , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
| 132 | 0 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_uncond_unet
SCREAMING_SNAKE_CASE : Union[str, Any] = KarrasVeScheduler()
SCREAMING_SNAKE_CASE : Any = KarrasVePipeline(unet=a , scheduler=a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe(num_inference_steps=2 , generator=a , output_type="numpy" ).images
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = pipe(num_inference_steps=2 , generator=a , output_type="numpy" , return_dict=a )[0]
SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : str = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = "google/ncsnpp-celebahq-256"
SCREAMING_SNAKE_CASE : List[Any] = UNetaDModel.from_pretrained(a )
SCREAMING_SNAKE_CASE : Any = KarrasVeScheduler()
SCREAMING_SNAKE_CASE : Optional[Any] = KarrasVePipeline(unet=a , scheduler=a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = pipe(num_inference_steps=20 , generator=a , output_type="numpy" ).images
SCREAMING_SNAKE_CASE : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
SCREAMING_SNAKE_CASE : str = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 25 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
# TODO Update this
a = {
"facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __a ( _snake_case ):
__UpperCamelCase : str = 'esm'
def __init__( self : Tuple ,lowerCamelCase : List[Any]=None ,lowerCamelCase : str=None ,lowerCamelCase : Any=None ,lowerCamelCase : Union[str, Any]=768 ,lowerCamelCase : Tuple=12 ,lowerCamelCase : int=12 ,lowerCamelCase : Optional[int]=3072 ,lowerCamelCase : List[Any]=0.1 ,lowerCamelCase : Optional[int]=0.1 ,lowerCamelCase : Any=1026 ,lowerCamelCase : str=0.02 ,lowerCamelCase : int=1E-1_2 ,lowerCamelCase : Union[str, Any]="absolute" ,lowerCamelCase : Optional[Any]=True ,lowerCamelCase : str=None ,lowerCamelCase : Optional[int]=False ,lowerCamelCase : int=False ,lowerCamelCase : Union[str, Any]=None ,lowerCamelCase : Any=None ,**lowerCamelCase : Any ,):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase ,mask_token_id=lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = emb_layer_norm_before
__SCREAMING_SNAKE_CASE = token_dropout
__SCREAMING_SNAKE_CASE = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
__SCREAMING_SNAKE_CASE = EsmFoldConfig()
elif isinstance(lowerCamelCase ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = EsmFoldConfig(**lowerCamelCase )
__SCREAMING_SNAKE_CASE = esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
__SCREAMING_SNAKE_CASE = get_default_vocab_list()
else:
__SCREAMING_SNAKE_CASE = vocab_list
else:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.esmfold_config is not None and getattr(self.esmfold_config ,"""use_esm_attn_map""" ,lowerCamelCase ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = super().to_dict()
if isinstance(self.esmfold_config ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = self.esmfold_config.to_dict()
return output
@dataclass
class __a :
__UpperCamelCase : str = None
__UpperCamelCase : bool = True
__UpperCamelCase : bool = False
__UpperCamelCase : bool = False
__UpperCamelCase : bool = False
__UpperCamelCase : float = 0
__UpperCamelCase : bool = True
__UpperCamelCase : bool = False
__UpperCamelCase : int = 128
__UpperCamelCase : "TrunkConfig" = None
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
if self.trunk is None:
__SCREAMING_SNAKE_CASE = TrunkConfig()
elif isinstance(self.trunk ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = TrunkConfig(**self.trunk )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = asdict(self )
__SCREAMING_SNAKE_CASE = self.trunk.to_dict()
return output
@dataclass
class __a :
__UpperCamelCase : int = 48
__UpperCamelCase : int = 1024
__UpperCamelCase : int = 128
__UpperCamelCase : int = 32
__UpperCamelCase : int = 32
__UpperCamelCase : int = 32
__UpperCamelCase : float = 0
__UpperCamelCase : float = 0
__UpperCamelCase : bool = False
__UpperCamelCase : int = 4
__UpperCamelCase : Optional[int] = 128
__UpperCamelCase : "StructureModuleConfig" = None
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
if self.structure_module is None:
__SCREAMING_SNAKE_CASE = StructureModuleConfig()
elif isinstance(self.structure_module ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
__SCREAMING_SNAKE_CASE = self.sequence_state_dim // self.sequence_head_width
__SCREAMING_SNAKE_CASE = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = asdict(self )
__SCREAMING_SNAKE_CASE = self.structure_module.to_dict()
return output
@dataclass
class __a :
__UpperCamelCase : int = 384
__UpperCamelCase : int = 128
__UpperCamelCase : int = 16
__UpperCamelCase : int = 128
__UpperCamelCase : int = 12
__UpperCamelCase : int = 4
__UpperCamelCase : int = 8
__UpperCamelCase : float = 0.1
__UpperCamelCase : int = 8
__UpperCamelCase : int = 1
__UpperCamelCase : int = 2
__UpperCamelCase : int = 7
__UpperCamelCase : int = 10
__UpperCamelCase : float = 1E-8
__UpperCamelCase : float = 1E5
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return asdict(self )
def __magic_name__ ( ) -> Dict:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 109 | 0 |
from functools import lru_cache
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = 2
lowercase__ = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(_UpperCamelCase )
if n > 1:
factors.add(_UpperCamelCase )
return factors
@lru_cache
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return len(unique_prime_factors(_UpperCamelCase ) )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return len(set(_UpperCamelCase ) ) in (0, 1)
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = 2
while True:
# Increment each value of a generated range
lowercase__ = [base + i for i in range(_UpperCamelCase )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
lowercase__ = [upf_len(_UpperCamelCase ) for x in group]
checker.append(_UpperCamelCase )
# If all numbers in the list are equal, return the group variable.
if equality(_UpperCamelCase ):
return group
# Increment our base variable by 1
base += 1
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = 4 ):
lowercase__ = run(_UpperCamelCase )
return results[0] if len(_UpperCamelCase ) else None
if __name__ == "__main__":
print(solution())
| 715 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __lowerCAmelCase ( ):
lowercase__ = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=SCREAMING_SNAKE_CASE_ )
lowercase__ = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=SCREAMING_SNAKE_CASE_ )
env_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
launch_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
tpu_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
test_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
# Let's go
lowercase__ = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE_ , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 37 | 0 |
"""simple docstring"""
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'-m' , '--pretrained_model_name_or_path' , type=_UpperCAmelCase , default=_UpperCAmelCase , required=_UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , )
parser.add_argument(
'-c' , '--caption' , type=_UpperCAmelCase , default='robotic cat with wings' , help='Text used to generate images.' , )
parser.add_argument(
'-n' , '--images_num' , type=_UpperCAmelCase , default=4 , help='How much images to generate.' , )
parser.add_argument(
'-s' , '--seed' , type=_UpperCAmelCase , default=42 , help='Seed for random process.' , )
parser.add_argument(
'-ci' , '--cuda_id' , type=_UpperCAmelCase , default=0 , help='cuda_id.' , )
lowerCAmelCase = parser.parse_args()
return args
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int] ):
if not len(_UpperCAmelCase ) == rows * cols:
raise ValueError('The specified number of rows and columns are not correct.' )
lowerCAmelCase ,lowerCAmelCase = imgs[0].size
lowerCAmelCase = Image.new('RGB' , size=(cols * w, rows * h) )
lowerCAmelCase ,lowerCAmelCase = grid.size
for i, img in enumerate(_UpperCAmelCase ):
grid.paste(_UpperCAmelCase , box=(i % cols * w, i // cols * h) )
return grid
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any]="robotic cat with wings" , _UpperCAmelCase : Optional[int]=7.5 , _UpperCAmelCase : Dict=50 , _UpperCAmelCase : Tuple=1 , _UpperCAmelCase : int=42 , ):
lowerCAmelCase = torch.Generator(pipeline.device ).manual_seed(_UpperCAmelCase )
lowerCAmelCase = pipeline(
_UpperCAmelCase , guidance_scale=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , generator=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , ).images
lowerCAmelCase = int(math.sqrt(_UpperCAmelCase ) )
lowerCAmelCase = image_grid(_UpperCAmelCase , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
__UpperCamelCase : Optional[Any] = parse_args()
# Load models and create wrapper for stable diffusion
__UpperCamelCase : List[Any] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''')
__UpperCamelCase : str = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''')
__UpperCamelCase : Optional[int] = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''')
__UpperCamelCase : List[str] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''')
__UpperCamelCase : Tuple = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
__UpperCamelCase : Union[str, Any] = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')):
__UpperCamelCase : Dict = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, '''unet''', unet)
else:
__UpperCamelCase : Dict = unet.to(torch.device('''cuda''', args.cuda_id))
__UpperCamelCase : Optional[Any] = pipeline.to(unet.device)
__UpperCamelCase ,__UpperCamelCase : List[Any] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split()))))
__UpperCamelCase : int = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
| 4 |
from __future__ import annotations
from typing import TypedDict
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 42
__snake_case = 42
def lowerCamelCase__ (__lowerCamelCase ):
if not isinstance(__lowerCamelCase, __lowerCamelCase ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(__lowerCamelCase ) )]
def lowerCamelCase__ (__lowerCamelCase ):
if not isinstance(__lowerCamelCase, __lowerCamelCase ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
_SCREAMING_SNAKE_CASE : Any = all_rotations(__lowerCamelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_SCREAMING_SNAKE_CASE : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__lowerCamelCase ),
}
return response
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
if not isinstance(__lowerCamelCase, __lowerCamelCase ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
_SCREAMING_SNAKE_CASE : Optional[int] = int(__lowerCamelCase )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(__lowerCamelCase ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
_SCREAMING_SNAKE_CASE : Optional[int] = [""] * len(__lowerCamelCase )
for _ in range(len(__lowerCamelCase ) ):
for i in range(len(__lowerCamelCase ) ):
_SCREAMING_SNAKE_CASE : List[str] = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
UpperCamelCase__ ='Provide a string that I will generate its BWT transform: '
UpperCamelCase__ =input(entry_msg).strip()
UpperCamelCase__ =bwt_transform(s)
print(
f"Burrows Wheeler transform for string '{s}' results "
f"in '{result['bwt_string']}'"
)
UpperCamelCase__ =reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
f"Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' "
f"we get original string '{original_string}'"
) | 249 | 0 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _SCREAMING_SNAKE_CASE ( pl.LightningModule ):
def __init__( self : Tuple , __lowerCamelCase : Union[str, Any] ):
super().__init__()
UpperCamelCase :Dict = model
UpperCamelCase :Optional[Any] = 2
UpperCamelCase :Tuple = nn.Linear(self.model.config.hidden_size , self.num_labels )
def _A ( self : Any ):
pass
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str , __magic_name__ : str , __magic_name__ : str ) -> Dict:
"""simple docstring"""
UpperCamelCase :Optional[Any] = LongformerModel.from_pretrained(__magic_name__ )
UpperCamelCase :Any = LightningModel(__magic_name__ )
UpperCamelCase :Any = torch.load(__magic_name__ , map_location=torch.device("""cpu""" ) )
lightning_model.load_state_dict(ckpt["""state_dict"""] )
# init longformer question answering model
UpperCamelCase :Optional[int] = LongformerForQuestionAnswering.from_pretrained(__magic_name__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__magic_name__ )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCAmelCase_ : str = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 590 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : jnp.ndarray
snake_case__ : jnp.ndarray
class _SCREAMING_SNAKE_CASE ( nn.Module ):
snake_case__ : int
snake_case__ : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
snake_case__ : jnp.dtype = jnp.floataa
def _A ( self : Any ):
UpperCamelCase :Union[str, Any] = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
UpperCamelCase :List[str] = []
for i in range(len(self.block_out_channels ) - 1 ):
UpperCamelCase :Optional[Any] = self.block_out_channels[i]
UpperCamelCase :List[Any] = self.block_out_channels[i + 1]
UpperCamelCase :List[Any] = nn.Conv(
__lowerCamelCase , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__lowerCamelCase )
UpperCamelCase :List[str] = nn.Conv(
__lowerCamelCase , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__lowerCamelCase )
UpperCamelCase :Tuple = blocks
UpperCamelCase :Optional[Any] = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Dict , __lowerCamelCase : Dict ):
UpperCamelCase :Tuple = self.conv_in(__lowerCamelCase )
UpperCamelCase :Optional[Any] = nn.silu(__lowerCamelCase )
for block in self.blocks:
UpperCamelCase :Tuple = block(__lowerCamelCase )
UpperCamelCase :List[str] = nn.silu(__lowerCamelCase )
UpperCamelCase :Dict = self.conv_out(__lowerCamelCase )
return embedding
@flax_register_to_config
class _SCREAMING_SNAKE_CASE ( nn.Module , _a , _a ):
snake_case__ : int = 3_2
snake_case__ : int = 4
snake_case__ : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
snake_case__ : Union[bool, Tuple[bool]] = False
snake_case__ : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
snake_case__ : int = 2
snake_case__ : Union[int, Tuple[int]] = 8
snake_case__ : Optional[Union[int, Tuple[int]]] = None
snake_case__ : int = 1_2_8_0
snake_case__ : float = 0.0
snake_case__ : bool = False
snake_case__ : jnp.dtype = jnp.floataa
snake_case__ : bool = True
snake_case__ : int = 0
snake_case__ : str = "rgb"
snake_case__ : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
def _A ( self : int , __lowerCamelCase : jax.random.KeyArray ):
# init input tensors
UpperCamelCase :int = (1, self.in_channels, self.sample_size, self.sample_size)
UpperCamelCase :Union[str, Any] = jnp.zeros(__lowerCamelCase , dtype=jnp.floataa )
UpperCamelCase :int = jnp.ones((1,) , dtype=jnp.intaa )
UpperCamelCase :Tuple = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
UpperCamelCase :Tuple = (1, 3, self.sample_size * 8, self.sample_size * 8)
UpperCamelCase :Tuple = jnp.zeros(__lowerCamelCase , dtype=jnp.floataa )
UpperCamelCase , UpperCamelCase :int = jax.random.split(__lowerCamelCase )
UpperCamelCase :Union[str, Any] = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )["params"]
def _A ( self : int ):
UpperCamelCase :Dict = self.block_out_channels
UpperCamelCase :Tuple = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
UpperCamelCase :List[Any] = self.num_attention_heads or self.attention_head_dim
# input
UpperCamelCase :Optional[Any] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
UpperCamelCase :Tuple = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
UpperCamelCase :Tuple = FlaxTimestepEmbedding(__lowerCamelCase , dtype=self.dtype )
UpperCamelCase :List[Any] = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
UpperCamelCase :Union[str, Any] = self.only_cross_attention
if isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase :List[Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase :str = (num_attention_heads,) * len(self.down_block_types )
# down
UpperCamelCase :int = []
UpperCamelCase :str = []
UpperCamelCase :str = block_out_channels[0]
UpperCamelCase :Optional[Any] = nn.Conv(
__lowerCamelCase , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__lowerCamelCase )
for i, down_block_type in enumerate(self.down_block_types ):
UpperCamelCase :List[str] = output_channel
UpperCamelCase :Optional[Any] = block_out_channels[i]
UpperCamelCase :Tuple = i == len(__lowerCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
UpperCamelCase :List[Any] = FlaxCrossAttnDownBlockaD(
in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
UpperCamelCase :List[Any] = FlaxDownBlockaD(
in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__lowerCamelCase )
for _ in range(self.layers_per_block ):
UpperCamelCase :List[Any] = nn.Conv(
__lowerCamelCase , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__lowerCamelCase )
if not is_final_block:
UpperCamelCase :str = nn.Conv(
__lowerCamelCase , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__lowerCamelCase )
UpperCamelCase :Optional[Any] = down_blocks
UpperCamelCase :Optional[Any] = controlnet_down_blocks
# mid
UpperCamelCase :str = block_out_channels[-1]
UpperCamelCase :Dict = FlaxUNetMidBlockaDCrossAttn(
in_channels=__lowerCamelCase , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
UpperCamelCase :List[str] = nn.Conv(
__lowerCamelCase , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : List[str] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : float = 1.0 , __lowerCamelCase : bool = True , __lowerCamelCase : bool = False , ):
UpperCamelCase :Dict = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
UpperCamelCase :List[Any] = jnp.flip(__lowerCamelCase , axis=1 )
# 1. time
if not isinstance(__lowerCamelCase , jnp.ndarray ):
UpperCamelCase :Any = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__lowerCamelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
UpperCamelCase :Any = timesteps.astype(dtype=jnp.floataa )
UpperCamelCase :Optional[Any] = jnp.expand_dims(__lowerCamelCase , 0 )
UpperCamelCase :Optional[Any] = self.time_proj(__lowerCamelCase )
UpperCamelCase :Any = self.time_embedding(__lowerCamelCase )
# 2. pre-process
UpperCamelCase :int = jnp.transpose(__lowerCamelCase , (0, 2, 3, 1) )
UpperCamelCase :Dict = self.conv_in(__lowerCamelCase )
UpperCamelCase :Any = jnp.transpose(__lowerCamelCase , (0, 2, 3, 1) )
UpperCamelCase :Optional[int] = self.controlnet_cond_embedding(__lowerCamelCase )
sample += controlnet_cond
# 3. down
UpperCamelCase :int = (sample,)
for down_block in self.down_blocks:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase , UpperCamelCase :Optional[Any] = down_block(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , deterministic=not train )
else:
UpperCamelCase , UpperCamelCase :Union[str, Any] = down_block(__lowerCamelCase , __lowerCamelCase , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
UpperCamelCase :List[str] = self.mid_block(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , deterministic=not train )
# 5. contronet blocks
UpperCamelCase :str = ()
for down_block_res_sample, controlnet_block in zip(__lowerCamelCase , self.controlnet_down_blocks ):
UpperCamelCase :Any = controlnet_block(__lowerCamelCase )
controlnet_down_block_res_samples += (down_block_res_sample,)
UpperCamelCase :Optional[Any] = controlnet_down_block_res_samples
UpperCamelCase :str = self.controlnet_mid_block(__lowerCamelCase )
# 6. scaling
UpperCamelCase :str = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=__lowerCamelCase , mid_block_res_sample=__lowerCamelCase )
| 590 | 1 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( snake_case = 10**9):
__snake_case = 1
__snake_case = 2
__snake_case = 0
__snake_case = 0
__snake_case = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__snake_case = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F"""{solution() = }""") | 564 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowercase : List[Any] = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any = ["ConvNextFeatureExtractor"]
__lowercase : Union[str, Any] = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[Any] = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Union[str, Any] = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
__lowercase : int = _LazyModule(__name__, globals()["__file__"], _import_structure) | 564 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
snake_case_ : Dict = logging.get_logger(__name__)
class UpperCamelCase__ ( __lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = ['pixel_values']
def __init__( self , snake_case = True , snake_case = 3_2 , snake_case=PILImageResampling.BILINEAR , snake_case = True , **snake_case , ):
'''simple docstring'''
UpperCAmelCase : Any = do_resize
UpperCAmelCase : Any = do_rescale
UpperCAmelCase : Tuple = size_divisor
UpperCAmelCase : List[str] = resample
super().__init__(**__A )
def A_ ( self , snake_case , snake_case , snake_case , snake_case = None , **snake_case ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Optional[int] = get_image_size(__A )
# Rounds the height and width down to the closest multiple of size_divisor
UpperCAmelCase : Optional[int] = height // size_divisor * size_divisor
UpperCAmelCase : Optional[int] = width // size_divisor * size_divisor
UpperCAmelCase : Tuple = resize(__A , (new_h, new_w) , resample=__A , data_format=__A , **__A )
return image
def A_ ( self , snake_case , snake_case , snake_case = None , **snake_case ):
'''simple docstring'''
return rescale(image=__A , scale=__A , data_format=__A , **__A )
def A_ ( self , snake_case , snake_case = None , snake_case = None , snake_case=None , snake_case = None , snake_case = None , snake_case = ChannelDimension.FIRST , **snake_case , ):
'''simple docstring'''
UpperCAmelCase : Any = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase : List[str] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase : Union[str, Any] = size_divisor if size_divisor is not None else self.size_divisor
UpperCAmelCase : List[Any] = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("size_divisor is required for resizing" )
UpperCAmelCase : Union[str, Any] = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError("Invalid image(s)" )
# All transformations expect numpy arrays.
UpperCAmelCase : Optional[Any] = [to_numpy_array(__A ) for img in images]
if do_resize:
UpperCAmelCase : int = [self.resize(__A , size_divisor=__A , resample=__A ) for image in images]
if do_rescale:
UpperCAmelCase : Optional[Any] = [self.rescale(__A , scale=1 / 2_5_5 ) for image in images]
UpperCAmelCase : List[str] = [to_channel_dimension_format(__A , __A ) for image in images]
UpperCAmelCase : Optional[int] = {"pixel_values": images}
return BatchFeature(data=__A , tensor_type=__A )
| 714 |
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 609 | 0 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__a : Tuple = '''▁'''
__a : Dict = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase( snake_case_ , unittest.TestCase ):
"""simple docstring"""
a : Optional[int] = BigBirdTokenizer
a : List[str] = BigBirdTokenizerFast
a : str = True
a : int = True
def __a ( self ) -> Optional[int]:
"""simple docstring"""
super().setUp()
lowercase__ : List[Any] = self.tokenizer_class(lowerCamelCase , keep_accents=lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self ) -> List[str]:
"""simple docstring"""
lowercase__ : Optional[int] = "<s>"
lowercase__ : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) , lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) , lowerCamelCase )
def __a ( self ) -> List[str]:
"""simple docstring"""
lowercase__ : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "[MASK]" )
self.assertEqual(len(lowerCamelCase ) , 1004 )
def __a ( self ) -> List[str]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def __a ( self ) -> Optional[Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowercase__ : Tuple = self.get_tokenizer()
lowercase__ : List[Any] = self.get_rust_tokenizer()
lowercase__ : List[Any] = "I was born in 92000, and this is falsé."
lowercase__ : Union[str, Any] = tokenizer.tokenize(lowerCamelCase )
lowercase__ : Tuple = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
lowercase__ : str = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
lowercase__ : Union[str, Any] = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
lowercase__ : Union[str, Any] = self.get_rust_tokenizer()
lowercase__ : int = tokenizer.encode(lowerCamelCase )
lowercase__ : Any = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def __a ( self ) -> List[str]:
"""simple docstring"""
lowercase__ : Optional[int] = BigBirdTokenizer(lowerCamelCase , keep_accents=lowerCamelCase )
lowercase__ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [285, 46, 10, 170, 382] , )
lowercase__ : List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowercase__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(lowerCamelCase )
self.assertListEqual(
lowerCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowercase__ : int = tokenizer.convert_ids_to_tokens(lowerCamelCase )
self.assertListEqual(
lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def __a ( self ) -> Optional[Any]:
"""simple docstring"""
return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
@slow
def __a ( self ) -> List[str]:
"""simple docstring"""
lowercase__ : List[Any] = "Hello World!"
lowercase__ : Union[str, Any] = [65, 18536, 2260, 101, 66]
self.assertListEqual(lowerCamelCase , self.big_tokenizer.encode(lowerCamelCase ) )
@slow
def __a ( self ) -> Dict:
"""simple docstring"""
lowercase__ : Any = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
# fmt: off
lowercase__ : Dict = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(lowerCamelCase , self.big_tokenizer.encode(lowerCamelCase ) )
@require_torch
@slow
def __a ( self ) -> List[Any]:
"""simple docstring"""
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
lowercase__ : Union[str, Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowercase__ : Any = " ".join(lowerCamelCase )
lowercase__ : Dict = self.big_tokenizer.encode_plus(lowerCamelCase , return_tensors="pt" , return_token_type_ids=lowerCamelCase )
lowercase__ : Dict = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=lowerCamelCase )
lowercase__ : str = BigBirdConfig(attention_type="original_full" )
lowercase__ : Union[str, Any] = BigBirdModel(lowerCamelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCamelCase )
model(**lowerCamelCase )
@slow
def __a ( self ) -> Dict:
"""simple docstring"""
lowercase__ : str = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
lowercase__ : List[Any] = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids )
self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" )
@slow
def __a ( self ) -> Any:
"""simple docstring"""
lowercase__ : str = {"input_ids": [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , ) | 397 |
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='''%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s''',
datefmt='''%Y-%m-%d %H:%M:%S''',
level=os.environ.get('''LOGLEVEL''', '''INFO''').upper(),
stream=sys.stdout,
)
__a : List[Any] = logging.getLogger(__name__)
__a : int = {'''facebook/bart-base''': BartForConditionalGeneration}
__a : List[str] = {'''facebook/bart-base''': BartTokenizer}
def snake_case_ ( ) -> List[Any]:
lowercase__ : Any = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." )
parser.add_argument(
"--validation_file" ,type=SCREAMING_SNAKE_CASE_ ,default=SCREAMING_SNAKE_CASE_ ,help="A csv or a json file containing the validation data." )
parser.add_argument(
"--max_length" ,type=SCREAMING_SNAKE_CASE_ ,default=5 ,help="The maximum total input sequence length after tokenization." ,)
parser.add_argument(
"--num_beams" ,type=SCREAMING_SNAKE_CASE_ ,default=SCREAMING_SNAKE_CASE_ ,help=(
"Number of beams to use for evaluation. This argument will be "
"passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."
) ,)
parser.add_argument(
"--model_name_or_path" ,type=SCREAMING_SNAKE_CASE_ ,help="Path to pretrained model or model identifier from huggingface.co/models." ,required=SCREAMING_SNAKE_CASE_ ,)
parser.add_argument(
"--config_name" ,type=SCREAMING_SNAKE_CASE_ ,default=SCREAMING_SNAKE_CASE_ ,help="Pretrained config name or path if not the same as model_name" ,)
parser.add_argument(
"--device" ,type=SCREAMING_SNAKE_CASE_ ,default="cpu" ,help="Device where the model will be run" ,)
parser.add_argument("--output_file_path" ,type=SCREAMING_SNAKE_CASE_ ,default=SCREAMING_SNAKE_CASE_ ,help="Where to store the final ONNX file." )
lowercase__ : str = parser.parse_args()
return args
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_="cpu" ) -> str:
lowercase__ : int = model_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
lowercase__ : List[str] = tokenizer_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE_ )
if model_name in ["facebook/bart-base"]:
lowercase__ : Any = 0
lowercase__ : List[str] = None
lowercase__ : int = 0
return huggingface_model, tokenizer
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> Any:
model.eval()
lowercase__ : List[Any] = None
lowercase__ : Optional[Any] = torch.jit.script(BARTBeamSearchGenerator(SCREAMING_SNAKE_CASE_ ) )
with torch.no_grad():
lowercase__ : str = "My friends are cool but they eat too many carbs."
lowercase__ : Optional[Any] = tokenizer([ARTICLE_TO_SUMMARIZE] ,max_length=10_24 ,return_tensors="pt" ).to(model.device )
lowercase__ : Optional[int] = model.generate(
inputs["input_ids"] ,attention_mask=inputs["attention_mask"] ,num_beams=SCREAMING_SNAKE_CASE_ ,max_length=SCREAMING_SNAKE_CASE_ ,early_stopping=SCREAMING_SNAKE_CASE_ ,decoder_start_token_id=model.config.decoder_start_token_id ,)
torch.onnx.export(
SCREAMING_SNAKE_CASE_ ,(
inputs["input_ids"],
inputs["attention_mask"],
num_beams,
max_length,
model.config.decoder_start_token_id,
) ,SCREAMING_SNAKE_CASE_ ,opset_version=14 ,input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] ,output_names=["output_ids"] ,dynamic_axes={
"input_ids": {0: "batch", 1: "seq"},
"output_ids": {0: "batch", 1: "seq_out"},
} ,example_outputs=SCREAMING_SNAKE_CASE_ ,)
logger.info("Model exported to {}".format(SCREAMING_SNAKE_CASE_ ) )
lowercase__ : Tuple = remove_dup_initializers(os.path.abspath(SCREAMING_SNAKE_CASE_ ) )
logger.info("Deduplicated and optimized model written to {}".format(SCREAMING_SNAKE_CASE_ ) )
lowercase__ : int = onnxruntime.InferenceSession(SCREAMING_SNAKE_CASE_ )
lowercase__ : Tuple = ort_sess.run(
SCREAMING_SNAKE_CASE_ ,{
"input_ids": inputs["input_ids"].cpu().numpy(),
"attention_mask": inputs["attention_mask"].cpu().numpy(),
"num_beams": np.array(SCREAMING_SNAKE_CASE_ ),
"max_length": np.array(SCREAMING_SNAKE_CASE_ ),
"decoder_start_token_id": np.array(model.config.decoder_start_token_id ),
} ,)
np.testing.assert_allclose(summary_ids.cpu().numpy() ,ort_out[0] ,rtol=1E-3 ,atol=1E-3 )
logger.info("Model outputs from torch and ONNX Runtime are similar." )
logger.info("Success." )
def snake_case_ ( ) -> List[Any]:
lowercase__ : Optional[Any] = parse_args()
lowercase__ : List[Any] = 5
lowercase__ : List[Any] = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" ,datefmt="%m/%d/%Y %H:%M:%S" ,level=logging.INFO ,)
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
lowercase__ : Optional[int] = torch.device(args.device )
lowercase__ , lowercase__ : Any = load_model_tokenizer(args.model_name_or_path ,SCREAMING_SNAKE_CASE_ )
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" )
model.to(SCREAMING_SNAKE_CASE_ )
if args.max_length:
lowercase__ : Tuple = args.max_length
if args.num_beams:
lowercase__ : str = args.num_beams
if args.output_file_path:
lowercase__ : str = args.output_file_path
else:
lowercase__ : Tuple = "BART.onnx"
logger.info("Exporting model to ONNX" )
export_and_validate_model(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main() | 397 | 1 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase , unittest.TestCase ):
__a =LongformerTokenizer
__a =True
__a =LongformerTokenizerFast
__a =True
def __UpperCamelCase ( self ) ->Optional[int]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__a = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__a = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
__a = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__a = {'unk_token': '<unk>'}
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCamelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCamelCase ) )
def __UpperCamelCase ( self , **lowerCamelCase ) ->Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase )
def __UpperCamelCase ( self , **lowerCamelCase ) ->Union[str, Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase )
def __UpperCamelCase ( self , lowerCamelCase ) ->Optional[int]:
'''simple docstring'''
__a = 'lower newer'
__a = 'lower newer'
return input_text, output_text
def __UpperCamelCase ( self ) ->Optional[Any]:
'''simple docstring'''
__a = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__a = 'lower newer'
__a = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
__a = tokenizer.tokenize(lowerCamelCase ) # , add_prefix_space=True)
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__a = tokens + [tokenizer.unk_token]
__a = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , lowerCamelCase )
def __UpperCamelCase ( self ) ->Tuple:
'''simple docstring'''
__a = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=lowerCamelCase ) , [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=lowerCamelCase ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , )
@slow
def __UpperCamelCase ( self ) ->Tuple:
'''simple docstring'''
__a = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
__a = tokenizer.encode('sequence builders' , add_special_tokens=lowerCamelCase )
__a = tokenizer.encode('multi-sequence build' , add_special_tokens=lowerCamelCase )
__a = tokenizer.encode(
'sequence builders' , add_special_tokens=lowerCamelCase , add_prefix_space=lowerCamelCase )
__a = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=lowerCamelCase , add_prefix_space=lowerCamelCase )
__a = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
__a = tokenizer.build_inputs_with_special_tokens(lowerCamelCase , lowerCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __UpperCamelCase ( self ) ->Optional[Any]:
'''simple docstring'''
__a = self.get_tokenizer()
__a = 'Encode this sequence.'
__a = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
__a = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase , add_prefix_space=lowerCamelCase )
__a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowerCamelCase , lowerCamelCase )
__a = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase , add_prefix_space=lowerCamelCase )
__a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowerCamelCase , lowerCamelCase )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
__a = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
__a = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowerCamelCase , lowerCamelCase )
# Testing spaces after special tokens
__a = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase )} ) # mask token has a left space
__a = tokenizer.convert_tokens_to_ids(lowerCamelCase )
__a = 'Encode <mask> sequence'
__a = 'Encode <mask>sequence'
__a = tokenizer.encode(lowerCamelCase )
__a = encoded.index(lowerCamelCase )
__a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowerCamelCase , lowerCamelCase )
__a = tokenizer.encode(lowerCamelCase )
__a = encoded.index(lowerCamelCase )
__a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowerCamelCase , lowerCamelCase )
def __UpperCamelCase ( self ) ->int:
'''simple docstring'''
pass
def __UpperCamelCase ( self ) ->int:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__a = self.rust_tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
__a = self.tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
__a = 'A, <mask> AllenNLP sentence.'
__a = tokenizer_r.encode_plus(lowerCamelCase , add_special_tokens=lowerCamelCase , return_token_type_ids=lowerCamelCase )
__a = tokenizer_p.encode_plus(lowerCamelCase , add_special_tokens=lowerCamelCase , return_token_type_ids=lowerCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
__a = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
__a = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
lowerCamelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
lowerCamelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def __UpperCamelCase ( self ) ->Any:
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__a = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase )
__a = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__a = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , lowerCamelCase )
self.assertEqual(post_processor_state['add_prefix_space'] , lowerCamelCase )
self.assertEqual(post_processor_state['trim_offsets'] , lowerCamelCase )
def __UpperCamelCase ( self ) ->Dict:
'''simple docstring'''
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__a = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
__a = F"""{text_of_1_token} {text_of_1_token}"""
__a = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase , use_fast=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase )
__a = tokenizer_r(lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase ) + 1, len(lowerCamelCase ) + 1 + len(lowerCamelCase )) , )
__a = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase , use_fast=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase )
__a = tokenizer_r(lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase ) + 1, len(lowerCamelCase ) + 1 + len(lowerCamelCase )) , )
__a = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase , use_fast=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase )
__a = tokenizer_r(lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase ), len(lowerCamelCase ) + 1 + len(lowerCamelCase )) , )
__a = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase , use_fast=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase )
__a = tokenizer_r(lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase ), len(lowerCamelCase ) + 1 + len(lowerCamelCase )) , )
__a = F""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__a = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase , use_fast=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase )
__a = tokenizer_r(lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase ) + 1, 1 + len(lowerCamelCase ) + 1 + len(lowerCamelCase )) , )
__a = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase , use_fast=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase )
__a = tokenizer_r(lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase ), 1 + len(lowerCamelCase ) + 1 + len(lowerCamelCase )) , )
__a = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase , use_fast=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase )
__a = tokenizer_r(lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase ), 1 + len(lowerCamelCase ) + 1 + len(lowerCamelCase )) , ) | 270 |
'''simple docstring'''
__UpperCamelCase : Tuple = """0.18.2"""
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor | 270 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
UpperCamelCase : Any = None
UpperCamelCase : int = logging.get_logger(__name__)
UpperCamelCase : Union[str, Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : str = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
UpperCamelCase : Optional[int] = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
UpperCamelCase : str = "▁"
# Segments (not really needed)
UpperCamelCase : str = 0
UpperCamelCase : int = 1
UpperCamelCase : List[Any] = 2
UpperCamelCase : Union[str, Any] = 3
UpperCamelCase : Optional[Any] = 4
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = """left"""
lowerCAmelCase = XLNetTokenizer
def __init__( self : Tuple , _lowercase : List[Any]=None , _lowercase : Any=None , _lowercase : int=False , _lowercase : Tuple=True , _lowercase : Union[str, Any]=False , _lowercase : int="<s>" , _lowercase : Optional[int]="</s>" , _lowercase : Dict="<unk>" , _lowercase : Optional[int]="<sep>" , _lowercase : int="<pad>" , _lowercase : Dict="<cls>" , _lowercase : str="<mask>" , _lowercase : List[str]=["<eop>", "<eod>"] , **_lowercase : Any , ):
# Mask token behave like a normal word, i.e. include the space before it
A = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
vocab_file=_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , additional_special_tokens=_lowercase , **_lowercase , )
A = 3
A = do_lower_case
A = remove_space
A = keep_accents
A = vocab_file
A = False if not self.vocab_file else True
def __a ( self : List[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __a ( self : Tuple , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __a ( self : Optional[Any] , _lowercase : str , _lowercase : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 690 |
"""simple docstring"""
def __snake_case ( UpperCamelCase__ ) -> list[int]:
"""simple docstring"""
A = [0 for i in range(len(UpperCamelCase__ ) )]
# initialize interval's left pointer and right pointer
A , A = 0, 0
for i in range(1 , len(UpperCamelCase__ ) ):
# case when current index is inside the interval
if i <= right_pointer:
A = min(right_pointer - i + 1 , z_result[i - left_pointer] )
A = min_edge
while go_next(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
A , A = i, i + z_result[i] - 1
return z_result
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> bool:
"""simple docstring"""
return i + z_result[i] < len(UpperCamelCase__ ) and s[z_result[i]] == s[i + z_result[i]]
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
"""simple docstring"""
A = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
A = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(UpperCamelCase__ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class __magic_name__ :
UpperCamelCase : Tuple = XGLMConfig
UpperCamelCase : List[Any] = {}
UpperCamelCase : Union[str, Any] = "gelu"
def __init__( self , __magic_name__ , __magic_name__=1_4 , __magic_name__=7 , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=9_9 , __magic_name__=3_2 , __magic_name__=2 , __magic_name__=4 , __magic_name__=3_7 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_2 , __magic_name__=0.02 , ):
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = d_model
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = ffn_dim
_lowerCAmelCase = activation_function
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = initializer_range
_lowerCAmelCase = None
_lowerCAmelCase = 0
_lowerCAmelCase = 2
_lowerCAmelCase = 1
def _lowerCamelCase ( self ):
"""simple docstring"""
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = self.get_config()
_lowerCAmelCase = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _lowerCamelCase ( self ):
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__magic_name__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__magic_name__ , )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = config_and_inputs
_lowerCAmelCase = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class __magic_name__ ( _UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
UpperCamelCase : Tuple = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
UpperCamelCase : List[Any] = (TFXGLMForCausalLM,) if is_tf_available() else ()
UpperCamelCase : str = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
UpperCamelCase : int = False
UpperCamelCase : Any = False
UpperCamelCase : str = False
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = TFXGLMModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=__magic_name__ , n_embd=3_7 )
def _lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = TFXGLMModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def _lowerCamelCase ( self ):
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class __magic_name__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self , __magic_name__=True ):
"""simple docstring"""
_lowerCAmelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
_lowerCAmelCase = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
_lowerCAmelCase = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1]
# fmt: on
_lowerCAmelCase = model.generate(__magic_name__ , do_sample=__magic_name__ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , __magic_name__ )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
_lowerCAmelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
_lowerCAmelCase = tokenizer('Today is a nice day and' , return_tensors='tf' )
_lowerCAmelCase = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
_lowerCAmelCase = model.generate(__magic_name__ , do_sample=__magic_name__ , seed=[7, 0] )
_lowerCAmelCase = tokenizer.decode(output_ids[0] , skip_special_tokens=__magic_name__ )
_lowerCAmelCase = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(__magic_name__ , __magic_name__ )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
_lowerCAmelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
_lowerCAmelCase = 'left'
# use different length sentences to test batching
_lowerCAmelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
_lowerCAmelCase = tokenizer(__magic_name__ , return_tensors='tf' , padding=__magic_name__ )
_lowerCAmelCase = inputs['input_ids']
_lowerCAmelCase = model.generate(input_ids=__magic_name__ , attention_mask=inputs['attention_mask'] , max_new_tokens=1_2 )
_lowerCAmelCase = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
_lowerCAmelCase = model.generate(input_ids=__magic_name__ , max_new_tokens=1_2 )
_lowerCAmelCase = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
_lowerCAmelCase = model.generate(input_ids=__magic_name__ , max_new_tokens=1_2 )
_lowerCAmelCase = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )
_lowerCAmelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__magic_name__ )
_lowerCAmelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=__magic_name__ )
_lowerCAmelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertListEqual(__magic_name__ , [non_padded_sentence, padded_sentence] )
| 707 |
"""simple docstring"""
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class __magic_name__ ( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
UpperCamelCase : Dict = StableDiffusionControlNetImgaImgPipeline
UpperCamelCase : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
UpperCamelCase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
UpperCamelCase : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _lowerCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
torch.manual_seed(0 )
_lowerCAmelCase = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
torch.manual_seed(0 )
_lowerCAmelCase = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=__magic_name__ , set_alpha_to_one=__magic_name__ , )
torch.manual_seed(0 )
_lowerCAmelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
_lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
_lowerCAmelCase = CLIPTextModel(__magic_name__ )
_lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowerCAmelCase = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _lowerCamelCase ( self , __magic_name__ , __magic_name__=0 ):
"""simple docstring"""
if str(__magic_name__ ).startswith('mps' ):
_lowerCAmelCase = torch.manual_seed(__magic_name__ )
else:
_lowerCAmelCase = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
_lowerCAmelCase = 2
_lowerCAmelCase = randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=__magic_name__ , device=torch.device(__magic_name__ ) , )
_lowerCAmelCase = floats_tensor(control_image.shape , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase = Image.fromarray(np.uinta(__magic_name__ ) ).convert('RGB' ).resize((6_4, 6_4) )
_lowerCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def _lowerCamelCase ( self ):
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class __magic_name__ ( _UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
UpperCamelCase : str = StableDiffusionControlNetImgaImgPipeline
UpperCamelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
UpperCamelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase : Optional[Any] = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def _lowerCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
torch.manual_seed(0 )
def init_weights(__magic_name__ ):
if isinstance(__magic_name__ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
_lowerCAmelCase = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
controlneta.controlnet_down_blocks.apply(__magic_name__ )
torch.manual_seed(0 )
_lowerCAmelCase = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
controlneta.controlnet_down_blocks.apply(__magic_name__ )
torch.manual_seed(0 )
_lowerCAmelCase = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=__magic_name__ , set_alpha_to_one=__magic_name__ , )
torch.manual_seed(0 )
_lowerCAmelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
_lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
_lowerCAmelCase = CLIPTextModel(__magic_name__ )
_lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowerCAmelCase = MultiControlNetModel([controlneta, controlneta] )
_lowerCAmelCase = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _lowerCamelCase ( self , __magic_name__ , __magic_name__=0 ):
"""simple docstring"""
if str(__magic_name__ ).startswith('mps' ):
_lowerCAmelCase = torch.manual_seed(__magic_name__ )
else:
_lowerCAmelCase = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
_lowerCAmelCase = 2
_lowerCAmelCase = [
randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=__magic_name__ , device=torch.device(__magic_name__ ) , ),
randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=__magic_name__ , device=torch.device(__magic_name__ ) , ),
]
_lowerCAmelCase = floats_tensor(control_image[0].shape , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase = Image.fromarray(np.uinta(__magic_name__ ) ).convert('RGB' ).resize((6_4, 6_4) )
_lowerCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**__magic_name__ )
pipe.to(__magic_name__ )
_lowerCAmelCase = 10.0
_lowerCAmelCase = 4
_lowerCAmelCase = self.get_dummy_inputs(__magic_name__ )
_lowerCAmelCase = steps
_lowerCAmelCase = scale
_lowerCAmelCase = pipe(**__magic_name__ )[0]
_lowerCAmelCase = self.get_dummy_inputs(__magic_name__ )
_lowerCAmelCase = steps
_lowerCAmelCase = scale
_lowerCAmelCase = pipe(**__magic_name__ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
_lowerCAmelCase = self.get_dummy_inputs(__magic_name__ )
_lowerCAmelCase = steps
_lowerCAmelCase = scale
_lowerCAmelCase = pipe(**__magic_name__ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
_lowerCAmelCase = self.get_dummy_inputs(__magic_name__ )
_lowerCAmelCase = steps
_lowerCAmelCase = scale
_lowerCAmelCase = pipe(**__magic_name__ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def _lowerCamelCase ( self ):
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(__magic_name__ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def _lowerCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny' )
_lowerCAmelCase = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , safety_checker=__magic_name__ , controlnet=__magic_name__ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__magic_name__ )
_lowerCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase = 'evil space-punk bird'
_lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ).resize((5_1_2, 5_1_2) )
_lowerCAmelCase = load_image(
'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png' ).resize((5_1_2, 5_1_2) )
_lowerCAmelCase = pipe(
__magic_name__ , __magic_name__ , control_image=__magic_name__ , generator=__magic_name__ , output_type='np' , num_inference_steps=5_0 , strength=0.6 , )
_lowerCAmelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
_lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy' )
assert np.abs(expected_image - image ).max() < 9e-2
| 309 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class a ( a__ ):
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 4 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__UpperCamelCase : List[Any] = ["""text""", """image""", """audio"""]
def a_ ( _A ) -> Optional[int]:
"""simple docstring"""
snake_case__ = []
for input_type in input_types:
if input_type == "text":
inputs.append('Text input' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(_A , _A ):
inputs.append(create_inputs(_A ) )
else:
raise ValueError(f'''Invalid type requested: {input_type}''' )
return inputs
def a_ ( _A ) -> Dict:
"""simple docstring"""
snake_case__ = []
for output in outputs:
if isinstance(_A , (str, AgentText) ):
output_types.append('text' )
elif isinstance(_A , (Image.Image, AgentImage) ):
output_types.append('image' )
elif isinstance(_A , (torch.Tensor, AgentAudio) ):
output_types.append('audio' )
else:
raise ValueError(f'''Invalid output: {output}''' )
return output_types
@is_tool_test
class __SCREAMING_SNAKE_CASE:
def lowerCAmelCase_ ( self: Optional[int] ) -> List[str]:
self.assertTrue(hasattr(self.tool , 'inputs' ) )
self.assertTrue(hasattr(self.tool , 'outputs' ) )
snake_case__ = self.tool.inputs
for _input in inputs:
if isinstance(_input , UpperCamelCase ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
snake_case__ = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCAmelCase_ ( self: Optional[int] ) -> Union[str, Any]:
snake_case__ = create_inputs(self.tool.inputs )
snake_case__ = self.tool(*UpperCamelCase )
# There is a single output
if len(self.tool.outputs ) == 1:
snake_case__ = [outputs]
self.assertListEqual(output_types(UpperCamelCase ) , self.tool.outputs )
def lowerCAmelCase_ ( self: int ) -> List[str]:
self.assertTrue(hasattr(self.tool , 'description' ) )
self.assertTrue(hasattr(self.tool , 'default_checkpoint' ) )
self.assertTrue(self.tool.description.startswith('This is a tool that' ) )
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Any:
snake_case__ = create_inputs(self.tool.inputs )
snake_case__ = self.tool(*UpperCamelCase )
if not isinstance(UpperCamelCase , UpperCamelCase ):
snake_case__ = [outputs]
self.assertEqual(len(UpperCamelCase ) , len(self.tool.outputs ) )
for output, output_type in zip(UpperCamelCase , self.tool.outputs ):
snake_case__ = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(UpperCamelCase , UpperCamelCase ) )
def lowerCAmelCase_ ( self: Any ) -> Optional[int]:
snake_case__ = create_inputs(self.tool.inputs )
snake_case__ = []
for _input, input_type in zip(UpperCamelCase , self.tool.inputs ):
if isinstance(UpperCamelCase , UpperCamelCase ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
snake_case__ = self.tool(*UpperCamelCase )
if not isinstance(UpperCamelCase , UpperCamelCase ):
snake_case__ = [outputs]
self.assertEqual(len(UpperCamelCase ) , len(self.tool.outputs ) )
| 328 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[int] = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class lowercase_ ( __snake_case ):
_lowerCamelCase = 'deberta-v2'
def __init__( self , lowercase_=128_100 , lowercase_=1_536 , lowercase_=24 , lowercase_=24 , lowercase_=6_144 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=0 , lowercase_=0.02 , lowercase_=1e-7 , lowercase_=False , lowercase_=-1 , lowercase_=0 , lowercase_=True , lowercase_=None , lowercase_=0 , lowercase_="gelu" , **lowercase_ , ):
super().__init__(**lowercase_ )
_snake_case : Tuple = hidden_size
_snake_case : Tuple = num_hidden_layers
_snake_case : Optional[int] = num_attention_heads
_snake_case : int = intermediate_size
_snake_case : List[Any] = hidden_act
_snake_case : Optional[Any] = hidden_dropout_prob
_snake_case : int = attention_probs_dropout_prob
_snake_case : Optional[int] = max_position_embeddings
_snake_case : Union[str, Any] = type_vocab_size
_snake_case : Optional[Any] = initializer_range
_snake_case : int = relative_attention
_snake_case : Tuple = max_relative_positions
_snake_case : List[Any] = pad_token_id
_snake_case : List[str] = position_biased_input
# Backwards compatibility
if type(lowercase_ ) == str:
_snake_case : Union[str, Any] = [x.strip() for x in pos_att_type.lower().split("|" )]
_snake_case : Union[str, Any] = pos_att_type
_snake_case : List[str] = vocab_size
_snake_case : int = layer_norm_eps
_snake_case : int = kwargs.get("pooler_hidden_size" , lowercase_ )
_snake_case : Union[str, Any] = pooler_dropout
_snake_case : Union[str, Any] = pooler_hidden_act
class lowercase_ ( __snake_case ):
@property
def UpperCamelCase ( self ):
if self.task == "multiple-choice":
_snake_case : Dict = {0: "batch", 1: "choice", 2: "sequence"}
else:
_snake_case : Dict = {0: "batch", 1: "sequence"}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)] )
else:
return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)] )
@property
def UpperCamelCase ( self ):
return 12
def UpperCamelCase ( self , lowercase_ , lowercase_ = -1 , lowercase_ = -1 , lowercase_ = -1 , lowercase_ = False , lowercase_ = None , lowercase_ = 3 , lowercase_ = 40 , lowercase_ = 40 , lowercase_ = None , ):
_snake_case : Optional[int] = super().generate_dummy_inputs(preprocessor=lowercase_ , framework=lowercase_ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs | 580 | import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(4_2)
__SCREAMING_SNAKE_CASE : int = 'bert-base-cased'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'fp16'
__SCREAMING_SNAKE_CASE : str = 'bf16'
__SCREAMING_SNAKE_CASE : Optional[int] = [FPaa, BFaa]
@require_fsdp
@require_cuda
class lowercase_ ( __snake_case ):
def UpperCamelCase ( self ):
super().setUp()
_snake_case : Optional[int] = dict(
ACCELERATE_USE_FSDP="true" , MASTER_ADDR="localhost" , MASTER_PORT="10999" , RANK="0" , LOCAL_RANK="0" , WORLD_SIZE="1" , )
def UpperCamelCase ( self ):
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(lowercase_ ):
_snake_case : Optional[Any] = self.dist_env.copy()
_snake_case : List[str] = f"""{i + 1}"""
_snake_case : int = strategy
with mockenv_context(**lowercase_ ):
_snake_case : Any = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def UpperCamelCase ( self ):
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(lowercase_ ):
_snake_case : List[str] = self.dist_env.copy()
_snake_case : List[Any] = prefetch_policy
with mockenv_context(**lowercase_ ):
_snake_case : List[str] = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def UpperCamelCase ( self ):
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(lowercase_ ):
_snake_case : str = self.dist_env.copy()
_snake_case : List[str] = state_dict_type
with mockenv_context(**lowercase_ ):
_snake_case : List[Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def UpperCamelCase ( self ):
_snake_case : Tuple = AutoModel.from_pretrained(lowercase_ )
for policy in FSDP_AUTO_WRAP_POLICY:
_snake_case : Optional[Any] = self.dist_env.copy()
_snake_case : List[str] = policy
if policy == "TRANSFORMER_BASED_WRAP":
_snake_case : List[str] = "BertLayer"
elif policy == "SIZE_BASED_WRAP":
_snake_case : str = "2000"
with mockenv_context(**lowercase_ ):
_snake_case : List[str] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(lowercase_ )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
_snake_case : str = self.dist_env.copy()
_snake_case : Tuple = "TRANSFORMER_BASED_WRAP"
_snake_case : Union[str, Any] = "T5Layer"
with mockenv_context(**lowercase_ ):
_snake_case : Optional[int] = FullyShardedDataParallelPlugin()
with self.assertRaises(lowercase_ ) as cm:
fsdp_plugin.set_auto_wrap_policy(lowercase_ )
self.assertTrue("Could not find the transformer layer class to wrap in the model." in str(cm.exception ) )
_snake_case : str = self.dist_env.copy()
_snake_case : Any = "SIZE_BASED_WRAP"
_snake_case : str = "0"
with mockenv_context(**lowercase_ ):
_snake_case : Optional[int] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(lowercase_ )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def UpperCamelCase ( self ):
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
_snake_case : Union[str, Any] = self.dist_env.copy()
_snake_case : int = mp_dtype
with mockenv_context(**lowercase_ ):
_snake_case : str = Accelerator()
if mp_dtype == "fp16":
_snake_case : List[str] = torch.floataa
elif mp_dtype == "bf16":
_snake_case : Any = torch.bfloataa
_snake_case : Dict = MixedPrecision(param_dtype=lowercase_ , reduce_dtype=lowercase_ , buffer_dtype=lowercase_ )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , lowercase_ )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , lowercase_ ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(lowercase_ )
def UpperCamelCase ( self ):
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
_snake_case : Union[str, Any] = self.dist_env.copy()
_snake_case : Tuple = str(lowercase_ ).lower()
with mockenv_context(**lowercase_ ):
_snake_case : Dict = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=lowercase_ ) )
@require_fsdp
@require_multi_gpu
@slow
class lowercase_ ( __snake_case ):
def UpperCamelCase ( self ):
super().setUp()
_snake_case : Dict = 0.82
_snake_case : str = [
"fsdp_shard_grad_op_transformer_based_wrap",
"fsdp_full_shard_transformer_based_wrap",
]
_snake_case : Tuple = {
"multi_gpu_fp16": 3_200,
"fsdp_shard_grad_op_transformer_based_wrap_fp16": 2_000,
"fsdp_full_shard_transformer_based_wrap_fp16": 1_900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
_snake_case : Tuple = 160
_snake_case : Optional[int] = 160
_snake_case : Optional[Any] = inspect.getfile(accelerate.test_utils )
_snake_case : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps"] )
def UpperCamelCase ( self ):
_snake_case : Optional[int] = os.path.join(self.test_scripts_folder , "test_performance.py" )
_snake_case : int = ["accelerate", "launch", "--num_processes=2", "--num_machines=1", "--machine_rank=0", "--use_fsdp"]
for config in self.performance_configs:
_snake_case : str = cmd.copy()
for i, strategy in enumerate(lowercase_ ):
if strategy.lower() in config:
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
break
if "fp32" in config:
cmd_config.append("--mixed_precision=no" )
else:
cmd_config.append("--mixed_precision=fp16" )
if "cpu_offload" in config:
cmd_config.append("--fsdp_offload_params=True" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("--fsdp_min_num_params=2000" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
f"""--performance_lower_bound={self.performance_lower_bound}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase_ , env=os.environ.copy() )
def UpperCamelCase ( self ):
_snake_case : Tuple = os.path.join(self.test_scripts_folder , "test_checkpointing.py" )
_snake_case : str = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
"--use_fsdp",
"--mixed_precision=fp16",
"--fsdp_transformer_layer_cls_to_wrap=BertLayer",
]
for i, strategy in enumerate(lowercase_ ):
_snake_case : str = cmd.copy()
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
if strategy != "FULL_SHARD":
continue
_snake_case : int = len(lowercase_ )
for state_dict_type in FSDP_STATE_DICT_TYPE:
_snake_case : int = cmd_config[:state_dict_config_index]
cmd_config.append(f"""--fsdp_state_dict_type={state_dict_type}""" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
"--partial_train_epoch=1",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase_ , env=os.environ.copy() )
_snake_case : Union[str, Any] = cmd_config[:-1]
_snake_case : Dict = os.path.join(self.tmpdir , "epoch_0" )
cmd_config.extend(
[
f"""--resume_from_checkpoint={resume_from_checkpoint}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase_ , env=os.environ.copy() )
def UpperCamelCase ( self ):
_snake_case : List[Any] = os.path.join(self.test_scripts_folder , "test_peak_memory_usage.py" )
_snake_case : Any = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
_snake_case : Tuple = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["--mixed_precision=fp16"] )
else:
cmd_config.extend(["--mixed_precision=no"] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["--use_fsdp"] )
for i, strategy in enumerate(lowercase_ ):
if strategy.lower() in spec:
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
break
if "cpu_offload" in spec:
cmd_config.append("--fsdp_offload_params=True" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("--fsdp_min_num_params=2000" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
f"""--peak_memory_upper_bound={peak_mem_upper_bound}""",
f"""--n_train={self.n_train}""",
f"""--n_val={self.n_val}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase_ , env=os.environ.copy() ) | 580 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__UpperCamelCase ) ,'Tatoeba directory does not exist.' )
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
@cached_property
def _a ( self ):
"""simple docstring"""
snake_case_ :List[Any] = tempfile.mkdtemp()
return TatoebaConverter(save_dir=a )
@slow
def _a ( self ):
"""simple docstring"""
self.resolver.convert_models(["heb-eng"] )
@slow
def _a ( self ):
"""simple docstring"""
snake_case_ , snake_case_ :Optional[int] = self.resolver.write_model_card("opus-mt-he-en" , dry_run=a )
assert mmeta["long_pair"] == "heb-eng"
| 584 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def A ( _A, _A ):
"""simple docstring"""
snake_case_ :List[str] = list(_A )
snake_case_ :Any = list(_A )
snake_case_ :Optional[Any] = 0
for i in range(len(_A ) ):
if lista[i] != lista[i]:
count += 1
snake_case_ :Optional[int] = "_"
if count > 1:
return False
else:
return "".join(_A )
def A ( _A ):
"""simple docstring"""
snake_case_ :Tuple = []
while True:
snake_case_ :int = ["$"] * len(_A )
snake_case_ :Union[str, Any] = []
for i in range(len(_A ) ):
for j in range(i + 1, len(_A ) ):
snake_case_ :Dict = compare_string(binary[i], binary[j] )
if k is False:
snake_case_ :Tuple = "*"
snake_case_ :List[str] = "*"
temp.append("X" )
for i in range(len(_A ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_A ) == 0:
return pi
snake_case_ :Dict = list(set(_A ) )
def A ( _A, _A ):
"""simple docstring"""
snake_case_ :Optional[int] = []
for minterm in minterms:
snake_case_ :Tuple = ""
for _ in range(_A ):
snake_case_ :Optional[int] = str(minterm % 2 ) + string
minterm //= 2
temp.append(_A )
return temp
def A ( _A, _A, _A ):
"""simple docstring"""
snake_case_ :Tuple = list(_A )
snake_case_ :List[str] = list(_A )
snake_case_ :Dict = 0
for i in range(len(_A ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def A ( _A, _A ):
"""simple docstring"""
snake_case_ :List[Any] = []
snake_case_ :List[Any] = [0] * len(_A )
for i in range(len(chart[0] ) ):
snake_case_ :List[Any] = 0
snake_case_ :Optional[Any] = -1
for j in range(len(_A ) ):
if chart[j][i] == 1:
count += 1
snake_case_ :Dict = j
if count == 1:
snake_case_ :str = 1
for i in range(len(_A ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_A ) ):
snake_case_ :str = 0
temp.append(prime_implicants[i] )
while True:
snake_case_ :Any = 0
snake_case_ :Optional[int] = -1
snake_case_ :List[Any] = 0
for i in range(len(_A ) ):
snake_case_ :str = chart[i].count(1 )
if count_n > max_n:
snake_case_ :Optional[Any] = count_n
snake_case_ :List[str] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_A ) ):
snake_case_ :Any = 0
def A ( _A, _A ):
"""simple docstring"""
snake_case_ :Optional[Any] = [[0 for x in range(len(_A ) )] for x in range(len(_A ) )]
for i in range(len(_A ) ):
snake_case_ :Dict = prime_implicants[i].count("_" )
for j in range(len(_A ) ):
if is_for_table(prime_implicants[i], binary[j], _A ):
snake_case_ :Optional[int] = 1
return chart
def A ( ):
"""simple docstring"""
snake_case_ :str = int(input("Enter the no. of variables\n" ) )
snake_case_ :Dict = [
float(_A )
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split()
]
snake_case_ :Tuple = decimal_to_binary(_A, _A )
snake_case_ :Tuple = check(_A )
print("Prime Implicants are:" )
print(_A )
snake_case_ :List[Any] = prime_implicant_chart(_A, _A )
snake_case_ :int = selection(_A, _A )
print("Essential Prime Implicants are:" )
print(_A )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 584 | 1 |
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = [
'''word_embeddings_layernorm.weight''',
'''word_embeddings_layernorm.bias''',
'''input_layernorm.weight''',
'''input_layernorm.bias''',
'''post_attention_layernorm.weight''',
'''post_attention_layernorm.bias''',
'''self_attention.dense.bias''',
'''mlp.dense_4h_to_h.bias''',
'''ln_f.weight''',
'''ln_f.bias''',
]
lowercase_ = [
'''mlp.dense_4h_to_h.weight''',
'''self_attention.dense.weight''',
]
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase ) ->str:
"""simple docstring"""
__magic_name__ : List[Any] = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
__magic_name__ : Optional[Any] = int(re.match(r'''.*layer_(\d*).*''', UpperCAmelCase )[1] )
layer_number -= 3
return F'''h.{layer_number}.''' + key
def lowerCAmelCase ( UpperCAmelCase ) ->Any:
"""simple docstring"""
if dtype == torch.bool:
return 1 / 8
__magic_name__ : Optional[int] = re.search(r'''[^\d](\d+)$''', str(UpperCAmelCase ) )
if bit_search is None:
raise ValueError(F'''`dtype` is not a valid dtype: {dtype}.''' )
__magic_name__ : Tuple = int(bit_search.groups()[0] )
return bit_size // 8
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
if bloom_config_file == "":
__magic_name__ : Union[str, Any] = BloomConfig()
else:
__magic_name__ : List[Any] = BloomConfig.from_json_file(UpperCAmelCase )
if shard_model:
__magic_name__ : Tuple = os.listdir(UpperCAmelCase )
__magic_name__ : int = sorted(filter(lambda UpperCAmelCase : s.startswith('''layer''' ) and "model_00" in s, UpperCAmelCase ) )
__magic_name__ : Optional[Any] = {'''weight_map''': {}, '''metadata''': {}}
__magic_name__ : int = 0
__magic_name__ : List[str] = None
__magic_name__ : Dict = BloomConfig()
for j, file in enumerate(UpperCAmelCase ):
print('''Processing file: {}'''.format(UpperCAmelCase ) )
__magic_name__ : Tuple = None
for i in range(UpperCAmelCase ):
# load all TP files
__magic_name__ : Union[str, Any] = file.replace('''model_00''', F'''model_0{i}''' )
__magic_name__ : List[str] = torch.load(os.path.join(UpperCAmelCase, UpperCAmelCase ), map_location='''cpu''' )
# Rename keys in the transformers names
__magic_name__ : Optional[Any] = list(temp.keys() )
for key in keys:
__magic_name__ : Optional[Any] = temp.pop(UpperCAmelCase )
if tensors is None:
__magic_name__ : Any = temp
else:
for key in tensors.keys():
if any(key.endswith(UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
__magic_name__ : Tuple = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
__magic_name__ : List[Any] = torch.cat([tensors[key], temp[key]], dim=UpperCAmelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
__magic_name__ : Dict = tensors[key] / pretraining_tp
torch.save(
UpperCAmelCase, os.path.join(
UpperCAmelCase, '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ), str(len(UpperCAmelCase ) ).zfill(5 ) ), ), )
for key in tensors.keys():
__magic_name__ : Tuple = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
__magic_name__ : str = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ), str(len(UpperCAmelCase ) ).zfill(5 ) )
__magic_name__ : List[str] = BloomConfig()
__magic_name__ : List[str] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
__magic_name__ : Tuple = total_size
with open(UpperCAmelCase, '''w''', encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(UpperCAmelCase, WEIGHTS_NAME + '''.index.json''' ), '''w''', encoding='''utf-8''' ) as f:
__magic_name__ : str = json.dumps(UpperCAmelCase, indent=2, sort_keys=UpperCAmelCase ) + '''\n'''
f.write(UpperCAmelCase )
else:
__magic_name__ : Dict = BloomModel(UpperCAmelCase )
__magic_name__ : Tuple = os.listdir(UpperCAmelCase )
__magic_name__ : Optional[Any] = sorted(filter(lambda UpperCAmelCase : s.startswith('''layer''' ) and "model_00" in s, UpperCAmelCase ) )
__magic_name__ : List[str] = None
for i, file in enumerate(UpperCAmelCase ):
__magic_name__ : str = None
for i in range(UpperCAmelCase ):
# load all TP files
__magic_name__ : List[str] = file.replace('''model_00''', F'''model_0{i}''' )
__magic_name__ : int = torch.load(os.path.join(UpperCAmelCase, UpperCAmelCase ), map_location='''cpu''' )
# Rename keys in the transformers names
__magic_name__ : Dict = list(temp.keys() )
for key in keys:
__magic_name__ : Tuple = temp.pop(UpperCAmelCase )
if tensors is None:
__magic_name__ : Any = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
__magic_name__ : int = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
__magic_name__ : str = torch.cat([tensors[key], temp[key]], dim=UpperCAmelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
__magic_name__ : Optional[int] = tensors[key] / pretraining_tp
__magic_name__ : int = model.load_state_dict(UpperCAmelCase, strict=UpperCAmelCase )
assert not other_keys.unexpected_keys, F'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
__magic_name__ : Any = set(other_keys.missing_keys )
else:
__magic_name__ : Optional[int] = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(UpperCAmelCase, exist_ok=UpperCAmelCase )
__magic_name__ : str = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
__magic_name__ : Union[str, Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' )
if config.torch_dtype is not None:
__magic_name__ : List[Any] = model.to(config.torch_dtype )
torch.save(model.state_dict(), UpperCAmelCase )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(UpperCAmelCase, '''w''', encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bloom_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path to the Megatron-LM checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--bloom_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--shard_model''',
action='''store_true''',
help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''',
)
parser.add_argument(
'''--pretraining_tp''',
default=4,
type=int,
help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''',
)
lowercase_ = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 336 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
lowercase_ = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
lowercase_ = subprocess.check_output(f"git diff --name-only {fork_point_sha}".split()).decode('''utf-8''').split()
lowercase_ = '''|'''.join(sys.argv[1:])
lowercase_ = re.compile(rf"^({joined_dirs}).*?\.py$")
lowercase_ = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 336 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
class _snake_case :
def __init__( self ,_snake_case ):
UpperCAmelCase_ : list[dict] = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []} )
for keyword in keywords:
self.add_keyword(_snake_case )
self.set_fail_transitions()
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : Optional[int] = 0
for character in keyword:
UpperCAmelCase_ : List[str] = self.find_next_state(_snake_case ,_snake_case )
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
UpperCAmelCase_ : str = len(self.adlist ) - 1
else:
UpperCAmelCase_ : str = next_state
self.adlist[current_state]["output"].append(_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : deque = deque()
for node in self.adlist[0]["next_states"]:
q.append(_snake_case )
UpperCAmelCase_ : Optional[int] = 0
while q:
UpperCAmelCase_ : int = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(_snake_case )
UpperCAmelCase_ : int = self.adlist[r]["fail_state"]
while (
self.find_next_state(_snake_case ,self.adlist[child]["value"] ) is None
and state != 0
):
UpperCAmelCase_ : Tuple = self.adlist[state]["fail_state"]
UpperCAmelCase_ : Union[str, Any] = self.find_next_state(
_snake_case ,self.adlist[child]["value"] )
if self.adlist[child]["fail_state"] is None:
UpperCAmelCase_ : Tuple = 0
UpperCAmelCase_ : Union[str, Any] = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : dict = {} # returns a dict with keywords and list of its occurrences
UpperCAmelCase_ : Optional[Any] = 0
for i in range(len(_snake_case ) ):
while (
self.find_next_state(_snake_case ,string[i] ) is None
and current_state != 0
):
UpperCAmelCase_ : List[str] = self.adlist[current_state]["fail_state"]
UpperCAmelCase_ : Union[str, Any] = self.find_next_state(_snake_case ,string[i] )
if next_state is None:
UpperCAmelCase_ : Optional[Any] = 0
else:
UpperCAmelCase_ : Union[str, Any] = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
UpperCAmelCase_ : int = []
result[key].append(i - len(_snake_case ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_lowercase = logging.get_logger(__name__)
_lowercase = OrderedDict(
[
('align', 'EfficientNetImageProcessor'),
('beit', 'BeitImageProcessor'),
('bit', 'BitImageProcessor'),
('blip', 'BlipImageProcessor'),
('blip-2', 'BlipImageProcessor'),
('bridgetower', 'BridgeTowerImageProcessor'),
('chinese_clip', 'ChineseCLIPImageProcessor'),
('clip', 'CLIPImageProcessor'),
('clipseg', 'ViTImageProcessor'),
('conditional_detr', 'ConditionalDetrImageProcessor'),
('convnext', 'ConvNextImageProcessor'),
('convnextv2', 'ConvNextImageProcessor'),
('cvt', 'ConvNextImageProcessor'),
('data2vec-vision', 'BeitImageProcessor'),
('deformable_detr', 'DeformableDetrImageProcessor'),
('deit', 'DeiTImageProcessor'),
('deta', 'DetaImageProcessor'),
('detr', 'DetrImageProcessor'),
('dinat', 'ViTImageProcessor'),
('donut-swin', 'DonutImageProcessor'),
('dpt', 'DPTImageProcessor'),
('efficientformer', 'EfficientFormerImageProcessor'),
('efficientnet', 'EfficientNetImageProcessor'),
('flava', 'FlavaImageProcessor'),
('focalnet', 'BitImageProcessor'),
('git', 'CLIPImageProcessor'),
('glpn', 'GLPNImageProcessor'),
('groupvit', 'CLIPImageProcessor'),
('imagegpt', 'ImageGPTImageProcessor'),
('instructblip', 'BlipImageProcessor'),
('layoutlmv2', 'LayoutLMv2ImageProcessor'),
('layoutlmv3', 'LayoutLMv3ImageProcessor'),
('levit', 'LevitImageProcessor'),
('mask2former', 'Mask2FormerImageProcessor'),
('maskformer', 'MaskFormerImageProcessor'),
('mgp-str', 'ViTImageProcessor'),
('mobilenet_v1', 'MobileNetV1ImageProcessor'),
('mobilenet_v2', 'MobileNetV2ImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevitv2', 'MobileViTImageProcessor'),
('nat', 'ViTImageProcessor'),
('oneformer', 'OneFormerImageProcessor'),
('owlvit', 'OwlViTImageProcessor'),
('perceiver', 'PerceiverImageProcessor'),
('pix2struct', 'Pix2StructImageProcessor'),
('poolformer', 'PoolFormerImageProcessor'),
('regnet', 'ConvNextImageProcessor'),
('resnet', 'ConvNextImageProcessor'),
('sam', 'SamImageProcessor'),
('segformer', 'SegformerImageProcessor'),
('swiftformer', 'ViTImageProcessor'),
('swin', 'ViTImageProcessor'),
('swin2sr', 'Swin2SRImageProcessor'),
('swinv2', 'ViTImageProcessor'),
('table-transformer', 'DetrImageProcessor'),
('timesformer', 'VideoMAEImageProcessor'),
('tvlt', 'TvltImageProcessor'),
('upernet', 'SegformerImageProcessor'),
('van', 'ConvNextImageProcessor'),
('videomae', 'VideoMAEImageProcessor'),
('vilt', 'ViltImageProcessor'),
('vit', 'ViTImageProcessor'),
('vit_hybrid', 'ViTHybridImageProcessor'),
('vit_mae', 'ViTImageProcessor'),
('vit_msn', 'ViTImageProcessor'),
('xclip', 'CLIPImageProcessor'),
('yolos', 'YolosImageProcessor'),
]
)
_lowercase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def __UpperCamelCase ( a : str ) ->str:
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
snake_case = model_type_to_module_name(a )
snake_case = importlib.import_module(f""".{module_name}""" , '''transformers.models''' )
try:
return getattr(a , a )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(a , '''__name__''' , a ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
snake_case = importlib.import_module('''transformers''' )
if hasattr(a , a ):
return getattr(a , a )
return None
def __UpperCamelCase ( a : Union[str, os.PathLike] , a : Optional[Union[str, os.PathLike]] = None , a : bool = False , a : bool = False , a : Optional[Dict[str, str]] = None , a : Optional[Union[bool, str]] = None , a : Optional[str] = None , a : bool = False , **a : Any , ) ->Any:
snake_case = get_file_from_repo(
a , a , cache_dir=a , force_download=a , resume_download=a , proxies=a , use_auth_token=a , revision=a , local_files_only=a , )
if resolved_config_file is None:
logger.info(
'''Could not locate the image processor configuration file, will try to use the model config instead.''' )
return {}
with open(a , encoding='''utf-8''' ) as reader:
return json.load(a )
class _lowercase :
def __init__( self ) -> Tuple:
raise EnvironmentError(
'''AutoImageProcessor is designed to be instantiated '''
'''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(A__ )
def UpperCamelCase ( cls , A__ , **A__ ) -> List[str]:
snake_case = kwargs.pop('''config''' , A__ )
snake_case = kwargs.pop('''trust_remote_code''' , A__ )
snake_case = True
snake_case , snake_case = ImageProcessingMixin.get_image_processor_dict(A__ , **A__ )
snake_case = config_dict.get('''image_processor_type''' , A__ )
snake_case = None
if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ):
snake_case = config_dict['''auto_map''']['''AutoImageProcessor''']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
snake_case = config_dict.pop('''feature_extractor_type''' , A__ )
if feature_extractor_class is not None:
logger.warning(
'''Could not find image processor class in the image processor config or the model config. Loading'''
''' based on pattern matching with the model\'s feature extractor configuration.''' )
snake_case = feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' )
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
snake_case = config_dict['''auto_map''']['''AutoFeatureExtractor''']
snake_case = feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' )
logger.warning(
'''Could not find image processor auto map in the image processor config or the model config.'''
''' Loading based on pattern matching with the model\'s feature extractor configuration.''' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(A__ , A__ ):
snake_case = AutoConfig.from_pretrained(A__ , **A__ )
# It could be in `config.image_processor_type``
snake_case = getattr(A__ , '''image_processor_type''' , A__ )
if hasattr(A__ , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map:
snake_case = config.auto_map['''AutoImageProcessor''']
if image_processor_class is not None:
snake_case = image_processor_class_from_name(A__ )
snake_case = image_processor_auto_map is not None
snake_case = image_processor_class is not None or type(A__ ) in IMAGE_PROCESSOR_MAPPING
snake_case = resolve_trust_remote_code(
A__ , A__ , A__ , A__ )
if has_remote_code and trust_remote_code:
snake_case = get_class_from_dynamic_module(
A__ , A__ , **A__ )
snake_case = kwargs.pop('''code_revision''' , A__ )
if os.path.isdir(A__ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(A__ , **A__ )
elif image_processor_class is not None:
return image_processor_class.from_dict(A__ , **A__ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(A__ ) in IMAGE_PROCESSOR_MAPPING:
snake_case = IMAGE_PROCESSOR_MAPPING[type(A__ )]
return image_processor_class.from_dict(A__ , **A__ )
raise ValueError(
F"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """
F"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def UpperCamelCase ( A__ , A__ ) -> Tuple:
IMAGE_PROCESSOR_MAPPING.register(A__ , A__ )
| 342 | 0 |
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase = "AAPL" ):
snake_case__ = F"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
snake_case__ = BeautifulSoup(requests.get(__lowerCAmelCase ).text , "html.parser" )
snake_case__ = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 706 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__magic_name__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
__magic_name__ = ''' def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
'''
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def A_ ( self ):
snake_case__ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , "models/bert/" ) )
snake_case__ = self.transformer_dir
shutil.copy(
os.path.join(lowerCamelCase , "src/transformers/models/bert/modeling_bert.py" ) , os.path.join(self.transformer_dir , "models/bert/modeling_bert.py" ) , )
def A_ ( self ):
snake_case__ = "src/transformers"
shutil.rmtree(self.transformer_dir )
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ):
snake_case__ = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
snake_case__ = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
snake_case__ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
snake_case__ = black.format_str(lowerCamelCase , mode=lowerCamelCase )
snake_case__ = os.path.join(self.transformer_dir , "new_code.py" )
with open(lowerCamelCase , "w" , newline="\n" ) as f:
f.write(lowerCamelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCamelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCamelCase )
with open(lowerCamelCase , "r" ) as f:
self.assertTrue(f.read() , lowerCamelCase )
def A_ ( self ):
snake_case__ = check_copies.find_code_in_transformers("models.bert.modeling_bert.BertLMPredictionHead" )
self.assertEqual(lowerCamelCase , lowerCamelCase )
def A_ ( self ):
# Base copy consistency
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , lowerCamelCase , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , re.sub("Bert" , "TestModel" , lowerCamelCase ) , )
# Copy consistency with a really long name
snake_case__ = "TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""" , F"""{long_class_name}LMPredictionHead""" , re.sub("Bert" , lowerCamelCase , lowerCamelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , lowerCamelCase , overwrite_result=re.sub("Bert" , "TestModel" , lowerCamelCase ) , )
def A_ ( self ):
snake_case__ = check_copies.LOCALIZED_READMES["README_zh-hans.md"]
snake_case__ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"
" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"
" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"
" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"
" Luong, Quoc V. Le, Christopher D. Manning."
)
snake_case__ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
snake_case__ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"
" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"
" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"
" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"
" Christopher D. Manning 发布。\n"
)
snake_case__ , snake_case__ = check_copies.convert_to_localized_md(
lowerCamelCase , lowerCamelCase , localized_readme["format_model_list"] )
self.assertFalse(lowerCamelCase )
self.assertEqual(lowerCamelCase , lowerCamelCase )
snake_case__ , snake_case__ = check_copies.convert_to_localized_md(
lowerCamelCase , lowerCamelCase , localized_readme["format_model_list"] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(lowerCamelCase )
snake_case__ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."
)
snake_case__ = (
"1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"
" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
snake_case__ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
snake_case__ , snake_case__ = check_copies.convert_to_localized_md(
lowerCamelCase , lowerCamelCase , localized_readme["format_model_list"] )
# Check if the model link is synchronized.
self.assertEqual(lowerCamelCase , lowerCamelCase )
| 530 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCamelCase ( __snake_case , unittest.TestCase):
__lowerCamelCase = KandinskyVaaControlnetImgaImgPipeline
__lowerCamelCase = ["image_embeds", "negative_image_embeds", "image", "hint"]
__lowerCamelCase = ["image_embeds", "negative_image_embeds", "image", "hint"]
__lowerCamelCase = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__lowerCamelCase = False
@property
def A (self ):
"""simple docstring"""
return 3_2
@property
def A (self ):
"""simple docstring"""
return 3_2
@property
def A (self ):
"""simple docstring"""
return self.time_input_dim
@property
def A (self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def A (self ):
"""simple docstring"""
return 1_0_0
@property
def A (self ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
A__ = UNetaDConditionModel(**lowerCamelCase__ )
return model
@property
def A (self ):
"""simple docstring"""
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def A (self ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = VQModel(**self.dummy_movq_kwargs )
return model
def A (self ):
"""simple docstring"""
A__ = self.dummy_unet
A__ = self.dummy_movq
A__ = {
"""num_train_timesteps""": 1_0_0_0,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0_0_8_5,
"""beta_end""": 0.0_1_2,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
A__ = DDIMScheduler(**lowerCamelCase__ )
A__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def A (self , lowerCamelCase__ , lowerCamelCase__=0 ):
"""simple docstring"""
A__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
A__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowerCamelCase__ )
# create init_image
A__ = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
A__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A__ = Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert("""RGB""" ).resize((2_5_6, 2_5_6) )
# create hint
A__ = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
if str(lowerCamelCase__ ).startswith("""mps""" ):
A__ = torch.manual_seed(lowerCamelCase__ )
else:
A__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
A__ = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 6_4,
"""width""": 6_4,
"""num_inference_steps""": 1_0,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def A (self ):
"""simple docstring"""
A__ = """cpu"""
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**lowerCamelCase__ )
A__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
A__ = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) )
A__ = output.images
A__ = pipe(
**self.get_dummy_inputs(lowerCamelCase__ ) , return_dict=lowerCamelCase__ , )[0]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
A__ = np.array(
[0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase):
def A (self ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A (self ):
"""simple docstring"""
A__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""" )
A__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
A__ = init_image.resize((5_1_2, 5_1_2) )
A__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
A__ = torch.from_numpy(np.array(lowerCamelCase__ ) ).float() / 2_5_5.0
A__ = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
A__ = """A robot, 4k photo"""
A__ = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase__ )
A__ = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
A__ = pipeline.to(lowerCamelCase__ )
pipeline.set_progress_bar_config(disable=lowerCamelCase__ )
A__ = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ ,A__ = pipe_prior(
lowerCamelCase__ , image=lowerCamelCase__ , strength=0.8_5 , generator=lowerCamelCase__ , negative_prompt="""""" , ).to_tuple()
A__ = pipeline(
image=lowerCamelCase__ , image_embeds=lowerCamelCase__ , negative_image_embeds=lowerCamelCase__ , hint=lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=1_0_0 , height=5_1_2 , width=5_1_2 , strength=0.5 , output_type="""np""" , )
A__ = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ )
| 574 |
"""simple docstring"""
from math import isqrt
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : int ):
A__ = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , UpperCamelCase , UpperCamelCase ):
A__ = False
return [i for i in range(2 , UpperCamelCase ) if is_prime[i]]
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : int = 10**8 ):
A__ = calculate_prime_numbers(max_number // 2 )
A__ = 0
A__ = 0
A__ = len(UpperCamelCase ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F'{solution() = }')
| 574 | 1 |
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int = 1000 ) -> int:
"""simple docstring"""
UpperCamelCase :Dict = -1
UpperCamelCase :Optional[Any] = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
UpperCamelCase :List[Any] = (n * n - 2 * a * n) // (2 * n - 2 * a)
UpperCamelCase :Tuple = n - a - b
if c * c == (a * a + b * b):
UpperCamelCase :List[Any] = a * b * c
if candidate >= product:
UpperCamelCase :str = candidate
return product
if __name__ == "__main__":
print(F'''{solution() = }''') | 717 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case__ : int = OpenAIGPTTokenizer
snake_case__ : Tuple = OpenAIGPTTokenizerFast
snake_case__ : Tuple = True
snake_case__ : Union[str, Any] = False
def _A ( self : int ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase :Optional[int] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
UpperCamelCase :List[Any] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
UpperCamelCase :Dict = ["""#version: 0.2""", """l o""", """lo w""", """e r</w>""", """"""]
UpperCamelCase :Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase :Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(__lowerCamelCase ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(__lowerCamelCase ) )
def _A ( self : List[Any] , __lowerCamelCase : List[str] ):
return "lower newer", "lower newer"
def _A ( self : List[str] ):
UpperCamelCase :Union[str, Any] = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
UpperCamelCase :List[Any] = """lower"""
UpperCamelCase :Any = ["""low""", """er</w>"""]
UpperCamelCase :List[str] = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :List[Any] = tokens + ["""<unk>"""]
UpperCamelCase :List[str] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase )
def _A ( self : Dict , __lowerCamelCase : List[Any]=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCamelCase :Union[str, Any] = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
# Simple input
UpperCamelCase :Optional[int] = """This is a simple input"""
UpperCamelCase :Optional[int] = ["""This is a simple input 1""", """This is a simple input 2"""]
UpperCamelCase :Optional[int] = ("""This is a simple input""", """This is a pair""")
UpperCamelCase :int = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(__lowerCamelCase , tokenizer_r.encode , __lowerCamelCase , max_length=__lowerCamelCase , padding="""max_length""" )
# Simple input
self.assertRaises(__lowerCamelCase , tokenizer_r.encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding="""max_length""" )
# Simple input
self.assertRaises(
__lowerCamelCase , tokenizer_r.batch_encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding="""max_length""" , )
# Pair input
self.assertRaises(__lowerCamelCase , tokenizer_r.encode , __lowerCamelCase , max_length=__lowerCamelCase , padding="""max_length""" )
# Pair input
self.assertRaises(__lowerCamelCase , tokenizer_r.encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding="""max_length""" )
# Pair input
self.assertRaises(
__lowerCamelCase , tokenizer_r.batch_encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding="""max_length""" , )
def _A ( self : Dict ):
pass
@require_ftfy
@require_spacy
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( _a ):
pass
| 590 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A__ : List[str] = {
'configuration_nezha': ['NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'NezhaConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] = [
'NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST',
'NezhaForNextSentencePrediction',
'NezhaForMaskedLM',
'NezhaForPreTraining',
'NezhaForMultipleChoice',
'NezhaForQuestionAnswering',
'NezhaForSequenceClassification',
'NezhaForTokenClassification',
'NezhaModel',
'NezhaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
A__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 353 |
"""simple docstring"""
from collections.abc import Callable
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase: float = a
_lowercase: float = b
if function(_UpperCamelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(_UpperCamelCase ) == 0:
return b
elif (
function(_UpperCamelCase ) * function(_UpperCamelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
_lowercase: float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_UpperCamelCase ) == 0:
return mid
elif function(_UpperCamelCase ) * function(_UpperCamelCase ) < 0:
_lowercase: Union[str, Any] = mid
else:
_lowercase: Any = mid
_lowercase: List[Any] = start + (end - start) / 2.0
return mid
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_0_0_0))
import doctest
doctest.testmod()
| 353 | 1 |
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
lowerCamelCase__ = {
# 1536-bit
5: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 2048-bit
14: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AACAA68FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 3072-bit
15: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 4096-bit
16: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'
+ 'FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 6144-bit
17: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'
+ '8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'
+ '302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'
+ 'A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'
+ '49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'
+ 'FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'
+ '180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'
+ '3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'
+ '04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'
+ 'B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'
+ '1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'
+ 'E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'
+ '99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'
+ '04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'
+ '233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'
+ 'D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'
+ 'AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'
+ 'DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'
+ '2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'
+ 'F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'
+ 'BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'
+ 'B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'
+ '387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'
+ '6DCC4024FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 8192-bit
18: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'
+ 'F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'
+ '179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'
+ 'DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'
+ '5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'
+ 'D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'
+ '23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'
+ '06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'
+ 'DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'
+ '12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'
+ '38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'
+ '741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'
+ '3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'
+ '22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'
+ '4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'
+ '062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'
+ '4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'
+ 'B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'
+ '4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'
+ '9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'
+ '60C980DD98EDD3DFFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
}
class lowerCAmelCase__ :
def __init__( self : Optional[Any] , lowerCamelCase__ : int = 14 ) ->None:
'''simple docstring'''
if group not in primes:
raise ValueError("Unsupported Group" )
_UpperCAmelCase : List[Any] = primes[group]["prime"]
_UpperCAmelCase : List[Any] = primes[group]["generator"]
_UpperCAmelCase : int = int(hexlify(urandom(32 ) ) , base=16 )
def lowerCAmelCase__ ( self : Optional[Any] ) ->str:
'''simple docstring'''
return hex(self.__private_key )[2:]
def lowerCAmelCase__ ( self : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : Tuple = pow(self.generator , self.__private_key , self.prime )
return hex(lowerCamelCase__ )[2:]
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : int ) ->bool:
'''simple docstring'''
return (
2 <= key <= self.prime - 2
and pow(lowerCamelCase__ , (self.prime - 1) // 2 , self.prime ) == 1
)
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : str ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = int(lowerCamelCase__ , base=16 )
if not self.is_valid_public_key(lowerCamelCase__ ):
raise ValueError("Invalid public key" )
_UpperCAmelCase : Dict = pow(lowerCamelCase__ , self.__private_key , self.prime )
return shaaaa(str(lowerCamelCase__ ).encode() ).hexdigest()
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase__ : int , lowerCamelCase__ : int ) ->bool:
'''simple docstring'''
return (
2 <= remote_public_key_str <= prime - 2
and pow(lowerCamelCase__ , (prime - 1) // 2 , lowerCamelCase__ ) == 1
)
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : int = 14 ) ->str:
'''simple docstring'''
_UpperCAmelCase : Any = int(lowerCamelCase__ , base=16 )
_UpperCAmelCase : List[str] = int(lowerCamelCase__ , base=16 )
_UpperCAmelCase : Any = primes[group]["prime"]
if not DiffieHellman.is_valid_public_key_static(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError("Invalid public key" )
_UpperCAmelCase : Any = pow(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return shaaaa(str(lowerCamelCase__ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(__lowerCAmelCase , __lowerCAmelCase ) ) )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
if dataset.ndim != value_array.ndim:
_UpperCAmelCase : Optional[Any] = (
"Wrong input data's dimensions... "
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(__lowerCAmelCase )
try:
if dataset.shape[1] != value_array.shape[1]:
_UpperCAmelCase : Optional[int] = (
"Wrong input data's shape... "
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(__lowerCAmelCase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
_UpperCAmelCase : Union[str, Any] = (
"Input data have different datatype... "
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(__lowerCAmelCase )
_UpperCAmelCase : Optional[int] = []
for value in value_array:
_UpperCAmelCase : List[str] = euclidean(__lowerCAmelCase , dataset[0] )
_UpperCAmelCase : Dict = dataset[0].tolist()
for dataset_value in dataset[1:]:
_UpperCAmelCase : int = euclidean(__lowerCAmelCase , __lowerCAmelCase )
if dist > temp_dist:
_UpperCAmelCase : Tuple = temp_dist
_UpperCAmelCase : Dict = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return np.dot(__lowerCAmelCase , __lowerCAmelCase ) / (norm(__lowerCAmelCase ) * norm(__lowerCAmelCase ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : Any = {
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[Any] = [
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
_a : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 479 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class UpperCamelCase_ ( __UpperCamelCase ):
"""simple docstring"""
A = CustomTokenizer
pass
| 479 | 1 |
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def lowerCAmelCase ( ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[str] = torch.nn.Linear(2 , 4 )
__SCREAMING_SNAKE_CASE: Optional[int] = torch.optim.AdamW(model.parameters() , lr=1.0 )
__SCREAMING_SNAKE_CASE: Optional[Any] = torch.optim.lr_scheduler.OneCycleLR(__A , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
__SCREAMING_SNAKE_CASE: Optional[int] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
__SCREAMING_SNAKE_CASE: Optional[Any] = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def lowerCAmelCase ( UpperCamelCase__ : Tuple ) -> List[str]:
"""simple docstring"""
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def lowerCAmelCase ( UpperCamelCase__ : str ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(__A )
class a ( __lowerCAmelCase ):
@require_cuda
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[str] = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(_UpperCamelCase ):
__SCREAMING_SNAKE_CASE: str = Accelerator(cpu=_UpperCamelCase )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: str = Accelerator()
__SCREAMING_SNAKE_CASE: List[Any] = GradientState()
assert state.num_steps == 1
__SCREAMING_SNAKE_CASE: Union[str, Any] = 4
assert state.num_steps == 4
assert state.sync_gradients is True
__SCREAMING_SNAKE_CASE: Dict = False
assert state.sync_gradients is False
GradientState._reset_state()
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[str] = Accelerator()
__SCREAMING_SNAKE_CASE: str = create_components()
(
__SCREAMING_SNAKE_CASE
): Tuple = accelerator.prepare(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: str = Accelerator()
__SCREAMING_SNAKE_CASE: int = create_components()
accelerator.prepare(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def snake_case_ ( self ):
"""simple docstring"""
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*_lowerCAmelCase , **_lowerCAmelCase ):
pass
with patch('''torch.cuda.set_device''' , _UpperCamelCase ), patch_environment(ACCELERATE_TORCH_DEVICE='''cuda:64''' ):
__SCREAMING_SNAKE_CASE: Any = Accelerator()
self.assertEqual(str(accelerator.state.device ) , '''cuda:64''' )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Tuple = Accelerator()
__SCREAMING_SNAKE_CASE: Optional[int] = create_components()
accelerator.prepare(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
__SCREAMING_SNAKE_CASE: Union[str, Any] = get_signature(_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_UpperCamelCase )
# make sure random weights don't match
load_random_weights(_UpperCamelCase )
self.assertTrue(abs(model_signature - get_signature(_UpperCamelCase ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(_UpperCamelCase )
self.assertTrue(abs(model_signature - get_signature(_UpperCamelCase ) ) < 1e-3 )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = Accelerator()
__SCREAMING_SNAKE_CASE: Dict = create_components()
accelerator.prepare(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
__SCREAMING_SNAKE_CASE: List[Any] = get_signature(_UpperCamelCase )
# saving hook
def save_config(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__SCREAMING_SNAKE_CASE: int = {"""class_name""": models[0].__class__.__name__}
with open(os.path.join(_UpperCamelCase , '''data.json''' ) , '''w''' ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase )
# loading hook
def load_config(_lowerCAmelCase , _lowerCAmelCase ):
with open(os.path.join(_UpperCamelCase , '''data.json''' ) , '''r''' ) as f:
__SCREAMING_SNAKE_CASE: Optional[Any] = json.load(_UpperCamelCase )
__SCREAMING_SNAKE_CASE: Optional[int] = config["""class_name"""]
__SCREAMING_SNAKE_CASE: Union[str, Any] = accelerator.register_save_state_pre_hook(_UpperCamelCase )
__SCREAMING_SNAKE_CASE: List[str] = accelerator.register_load_state_pre_hook(_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_UpperCamelCase )
# make sure random weights don't match with hooks
load_random_weights(_UpperCamelCase )
self.assertTrue(abs(model_signature - get_signature(_UpperCamelCase ) ) > 1e-3 )
# random class name to verify correct one is loaded
__SCREAMING_SNAKE_CASE: Tuple = """random"""
# make sure loaded weights match with hooks
accelerator.load_state(_UpperCamelCase )
self.assertTrue(abs(model_signature - get_signature(_UpperCamelCase ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_UpperCamelCase )
# make sure random weights don't match with hooks removed
load_random_weights(_UpperCamelCase )
self.assertTrue(abs(model_signature - get_signature(_UpperCamelCase ) ) > 1e-3 )
# random class name to verify correct one is loaded
__SCREAMING_SNAKE_CASE: Optional[int] = """random"""
# make sure loaded weights match with hooks removed
accelerator.load_state(_UpperCamelCase )
self.assertTrue(abs(model_signature - get_signature(_UpperCamelCase ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = Accelerator()
__SCREAMING_SNAKE_CASE: Dict = create_components()
__SCREAMING_SNAKE_CASE: List[str] = None
# This should work
__SCREAMING_SNAKE_CASE: Dict = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
self.assertTrue(dummy_obj is None )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = Accelerator()
__SCREAMING_SNAKE_CASE: List[Any] = create_components()
__SCREAMING_SNAKE_CASE: List[str] = [1, 2, 3]
# This should work
__SCREAMING_SNAKE_CASE: Union[str, Any] = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
self.assertEqual(
getattr(_UpperCamelCase , '''_is_accelerate_prepared''' , _UpperCamelCase ) , _UpperCamelCase , '''Dummy object should have `_is_accelerate_prepared` set to `True`''' , )
self.assertEqual(
getattr(_UpperCamelCase , '''_is_accelerate_prepared''' , _UpperCamelCase ) , _UpperCamelCase , '''Model is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(_UpperCamelCase , '''_is_accelerate_prepared''' , _UpperCamelCase ) , _UpperCamelCase , '''Optimizer is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(_UpperCamelCase , '''_is_accelerate_prepared''' , _UpperCamelCase ) , _UpperCamelCase , '''Scheduler is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(_UpperCamelCase , '''_is_accelerate_prepared''' , _UpperCamelCase ) , _UpperCamelCase , '''Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(_UpperCamelCase , '''_is_accelerate_prepared''' , _UpperCamelCase ) , _UpperCamelCase , '''Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
@slow
@require_bnb
def snake_case_ ( self ):
"""simple docstring"""
from transformers import AutoModelForCausalLM
__SCREAMING_SNAKE_CASE: Tuple = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=_UpperCamelCase , device_map={'''''': 0} , )
__SCREAMING_SNAKE_CASE: Union[str, Any] = Accelerator()
# This should work
__SCREAMING_SNAKE_CASE: int = accelerator.prepare(_UpperCamelCase )
@slow
@require_bnb
def snake_case_ ( self ):
"""simple docstring"""
from transformers import AutoModelForCausalLM
__SCREAMING_SNAKE_CASE: List[str] = Accelerator()
with init_empty_weights():
__SCREAMING_SNAKE_CASE: Any = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
__SCREAMING_SNAKE_CASE: Union[str, Any] = infer_auto_device_map(_UpperCamelCase )
__SCREAMING_SNAKE_CASE: Any = """cpu"""
__SCREAMING_SNAKE_CASE: List[str] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , device_map=_UpperCamelCase , load_in_abit=_UpperCamelCase , llm_inta_enable_fpaa_cpu_offload=_UpperCamelCase )
# This should not work and get value error
with self.assertRaises(_UpperCamelCase ):
__SCREAMING_SNAKE_CASE: Tuple = accelerator.prepare(_UpperCamelCase )
@slow
@require_bnb
@require_multi_gpu
def snake_case_ ( self ):
"""simple docstring"""
from transformers import AutoModelForCausalLM
__SCREAMING_SNAKE_CASE: Union[str, Any] = {"""distributed_type""": DistributedType.MULTI_GPU}
with init_empty_weights():
__SCREAMING_SNAKE_CASE: Optional[Any] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
__SCREAMING_SNAKE_CASE: List[Any] = infer_auto_device_map(_UpperCamelCase )
__SCREAMING_SNAKE_CASE: Optional[Any] = 1
__SCREAMING_SNAKE_CASE: Optional[int] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=_UpperCamelCase , device_map=_UpperCamelCase , )
__SCREAMING_SNAKE_CASE: Optional[Any] = Accelerator()
# This should not work and get value error
with self.assertRaises(_UpperCamelCase ):
__SCREAMING_SNAKE_CASE: str = accelerator.prepare(_UpperCamelCase )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def snake_case_ ( self ):
"""simple docstring"""
from transformers import AutoModelForCausalLM
with init_empty_weights():
__SCREAMING_SNAKE_CASE: Optional[int] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
__SCREAMING_SNAKE_CASE: int = infer_auto_device_map(_UpperCamelCase )
__SCREAMING_SNAKE_CASE: List[str] = 1
__SCREAMING_SNAKE_CASE: Dict = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=_UpperCamelCase , device_map=_UpperCamelCase , )
__SCREAMING_SNAKE_CASE: List[str] = Accelerator()
# This should work
__SCREAMING_SNAKE_CASE: List[Any] = accelerator.prepare(_UpperCamelCase )
@require_cuda
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = torch.nn.Linear(10 , 10 )
__SCREAMING_SNAKE_CASE: Dict = torch.optim.SGD(model.parameters() , lr=0.01 )
__SCREAMING_SNAKE_CASE: Tuple = Accelerator(cpu=_UpperCamelCase )
__SCREAMING_SNAKE_CASE: Union[str, Any] = accelerator.prepare(_UpperCamelCase )
| 715 |
from sklearn.metrics import recall_score
import datasets
lowerCAmelCase : str = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
lowerCAmelCase : Dict = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
lowerCAmelCase : List[Any] = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def snake_case_ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=1 , _lowerCAmelCase="binary" , _lowerCAmelCase=None , _lowerCAmelCase="warn" , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[Any] = recall_score(
_lowerCAmelCase , _lowerCAmelCase , labels=_lowerCAmelCase , pos_label=_lowerCAmelCase , average=_lowerCAmelCase , sample_weight=_lowerCAmelCase , zero_division=_lowerCAmelCase , )
return {"recall": float(_lowerCAmelCase ) if score.size == 1 else score}
| 146 | 0 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class snake_case__ ( UpperCamelCase_ ):
_lowerCAmelCase ='segformer'
def __init__( self : Union[str, Any] , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : List[Any]=4 , _lowerCamelCase : Union[str, Any]=[2, 2, 2, 2] , _lowerCamelCase : Dict=[8, 4, 2, 1] , _lowerCamelCase : Any=[3_2, 6_4, 1_6_0, 2_5_6] , _lowerCamelCase : List[Any]=[7, 3, 3, 3] , _lowerCamelCase : int=[4, 2, 2, 2] , _lowerCamelCase : List[Any]=[1, 2, 5, 8] , _lowerCamelCase : Tuple=[4, 4, 4, 4] , _lowerCamelCase : Optional[int]="gelu" , _lowerCamelCase : Any=0.0 , _lowerCamelCase : Union[str, Any]=0.0 , _lowerCamelCase : Union[str, Any]=0.1 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : int=0.1 , _lowerCamelCase : Optional[int]=1E-6 , _lowerCamelCase : Optional[Any]=2_5_6 , _lowerCamelCase : int=2_5_5 , **_lowerCamelCase : Any , ):
super().__init__(**_lowerCamelCase )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , _lowerCamelCase , )
snake_case__ : Dict = num_channels
snake_case__ : Optional[Any] = num_encoder_blocks
snake_case__ : Optional[int] = depths
snake_case__ : Optional[int] = sr_ratios
snake_case__ : Optional[int] = hidden_sizes
snake_case__ : Union[str, Any] = patch_sizes
snake_case__ : Union[str, Any] = strides
snake_case__ : Any = mlp_ratios
snake_case__ : List[str] = num_attention_heads
snake_case__ : Dict = hidden_act
snake_case__ : int = hidden_dropout_prob
snake_case__ : Union[str, Any] = attention_probs_dropout_prob
snake_case__ : Union[str, Any] = classifier_dropout_prob
snake_case__ : str = initializer_range
snake_case__ : Union[str, Any] = drop_path_rate
snake_case__ : List[Any] = layer_norm_eps
snake_case__ : Optional[int] = decoder_hidden_size
snake_case__ : str = kwargs.get('reshape_last_stage' , _lowerCamelCase )
snake_case__ : Dict = semantic_loss_ignore_index
class snake_case__ ( UpperCamelCase_ ):
_lowerCAmelCase =version.parse('1.11' )
@property
def UpperCAmelCase__ ( self : List[str] ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCAmelCase__ ( self : List[Any] ):
return 1E-4
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return 1_2
| 170 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def lowercase__( A ):
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def lowercase__( A ):
snake_case__ : Dict = create_tensor(A )
snake_case__ : Any = gather(A )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def lowercase__( A ):
snake_case__ : Dict = [state.process_index]
snake_case__ : List[str] = gather_object(A )
assert len(A ) == state.num_processes, f'''{gathered_obj}, {len(A )} != {state.num_processes}'''
assert gathered_obj == list(range(state.num_processes ) ), f'''{gathered_obj} != {list(range(state.num_processes ) )}'''
def lowercase__( A ):
snake_case__ : Optional[Any] = create_tensor(A )
snake_case__ : Optional[int] = broadcast(A )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def lowercase__( A ):
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
snake_case__ : Dict = torch.arange(state.num_processes + 1 ).to(state.device )
else:
snake_case__ : Optional[Any] = torch.arange(state.num_processes ).to(state.device )
snake_case__ : List[Any] = pad_across_processes(A )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def lowercase__( A ):
# For now runs on only two processes
if state.num_processes != 2:
return
snake_case__ : Tuple = create_tensor(A )
snake_case__ : Optional[Any] = reduce(A , 'sum' )
snake_case__ : Optional[Any] = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(A , A ), f'''{reduced_tensor} != {truth_tensor}'''
def lowercase__( A ):
# For now runs on only two processes
if state.num_processes != 2:
return
snake_case__ : Optional[Any] = create_tensor(A )
snake_case__ : Optional[Any] = reduce(A , 'mean' )
snake_case__ : int = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(A , A ), f'''{reduced_tensor} != {truth_tensor}'''
def lowercase__( A ):
# For xla_spawn (TPUs)
main()
def lowercase__( ):
snake_case__ : Optional[Any] = PartialState()
state.print(f'''State: {state}''' )
state.print('testing gather' )
test_gather(A )
state.print('testing gather_object' )
test_gather_object(A )
state.print('testing broadcast' )
test_broadcast(A )
state.print('testing pad_across_processes' )
test_pad_across_processes(A )
state.print('testing reduce_sum' )
test_reduce_sum(A )
state.print('testing reduce_mean' )
test_reduce_mean(A )
if __name__ == "__main__":
main()
| 170 | 1 |
from cva import destroyAllWindows, imread, imshow, waitKey
def a__ (__lowercase :List[Any] ) -> Dict:
# getting number of pixels in the image
_A = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(__lowercase ):
for j in range(__lowercase ):
_A = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
_UpperCamelCase : Union[str, Any] =imread('image_data/lena.jpg', 1)
# convert to its negative
_UpperCamelCase : Optional[int] =convert_to_negative(img)
# show result image
imshow('negative of original image', img)
waitKey(0)
destroyAllWindows()
| 712 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
_UpperCamelCase : Optional[int] ={'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[Any] =['DPTFeatureExtractor']
_UpperCamelCase : Optional[Any] =['DPTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[int] =[
'DPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DPTForDepthEstimation',
'DPTForSemanticSegmentation',
'DPTModel',
'DPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
_UpperCamelCase : str =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 332 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
_lowercase : Dict = """2020.9.26"""
_lowercase : Any = """xcodz-dot, cclaus, dhruvmanila"""
def lowerCamelCase__ ( A : float , A : float , A : float , A : float , A : float ):
'''simple docstring'''
if not all(isinstance(A , (float, int) ) for val in locals().values() ):
UpperCAmelCase = f"""Input values must either be float or int: {list(locals().values() )}"""
raise TypeError(A )
UpperCAmelCase = ((x * distance) / (z + distance)) * scale
UpperCAmelCase = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def lowerCamelCase__ ( A : float , A : float , A : float , A : str , A : float ):
'''simple docstring'''
if not isinstance(A , A ):
raise TypeError('''Axis must be a str''' )
UpperCAmelCase = locals()
del input_variables["axis"]
if not all(isinstance(A , (float, int) ) for val in input_variables.values() ):
UpperCAmelCase = (
'''Input values except axis must either be float or int: '''
f"""{list(input_variables.values() )}"""
)
raise TypeError(A )
UpperCAmelCase = (angle % 3_60) / 4_50 * 1_80 / math.pi
if axis == "z":
UpperCAmelCase = x * math.cos(A ) - y * math.sin(A )
UpperCAmelCase = y * math.cos(A ) + x * math.sin(A )
UpperCAmelCase = z
elif axis == "x":
UpperCAmelCase = y * math.cos(A ) - z * math.sin(A )
UpperCAmelCase = z * math.cos(A ) + y * math.sin(A )
UpperCAmelCase = x
elif axis == "y":
UpperCAmelCase = x * math.cos(A ) - z * math.sin(A )
UpperCAmelCase = z * math.cos(A ) + x * math.sin(A )
UpperCAmelCase = y
else:
raise ValueError('''not a valid axis, choose one of \'x\', \'y\', \'z\'''' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(F"""{rotate(1.0, 2.0, 3.0, "y", 90.0) = }""")
| 210 |
'''simple docstring'''
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_lowercase : Tuple = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
_lowercase : str = importlib.util.spec_from_file_location(
"""transformers""",
os.path.join(PATH_TO_TRANSFORMERS, """__init__.py"""),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
_lowercase : Optional[Any] = spec.loader.load_module()
_lowercase : List[Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_lowercase : List[Any] = re.compile("""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
_lowercase : List[str] = {
"""CLIPConfigMixin""",
"""DecisionTransformerConfigMixin""",
"""EncoderDecoderConfigMixin""",
"""RagConfigMixin""",
"""SpeechEncoderDecoderConfigMixin""",
"""VisionEncoderDecoderConfigMixin""",
"""VisionTextDualEncoderConfigMixin""",
}
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = []
for config_class in list(CONFIG_MAPPING.values() ):
UpperCAmelCase = False
# source code of `config_class`
UpperCAmelCase = inspect.getsource(A )
UpperCAmelCase = _re_checkpoint.findall(A )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
UpperCAmelCase , UpperCAmelCase = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
UpperCAmelCase = f"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
UpperCAmelCase = True
break
UpperCAmelCase = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(A )
if len(A ) > 0:
UpperCAmelCase = '''\n'''.join(sorted(A ) )
raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 210 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__A : Union[str, Any] = logging.get_logger(__name__)
__A : Tuple = "▁"
__A : List[Any] = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"}
__A : Union[str, Any] = {
"vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model",
},
"monolingual_vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt",
},
}
__A : Tuple = {"vinai/bartpho-syllable": 1_024}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : Optional[int] = VOCAB_FILES_NAMES
__magic_name__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : str = ["""input_ids""", """attention_mask"""]
def __init__( self : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple="<s>" , UpperCamelCase__ : Any="</s>" , UpperCamelCase__ : Union[str, Any]="</s>" , UpperCamelCase__ : str="<s>" , UpperCamelCase__ : Any="<unk>" , UpperCamelCase__ : Union[str, Any]="<pad>" , UpperCamelCase__ : Any="<mask>" , UpperCamelCase__ : Optional[Dict[str, Any]] = None , **UpperCamelCase__ : int , ):
# Mask token behave like a normal word, i.e. include the space before it
A__ : Optional[int] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
A__ : Tuple ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
A__ : int =vocab_file
A__ : int =monolingual_vocab_file
A__ : List[Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase__ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
A__ : Optional[Any] ={}
A__ : Optional[Any] =0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(UpperCamelCase__ ) not in self.fairseq_tokens_to_ids:
A__ : List[Any] =cnt
cnt += 1
with open(UpperCamelCase__ , "r" , encoding="utf-8" ) as f:
for line in f.readlines():
A__ : Tuple =line.strip().split()[0]
A__ : str =len(self.fairseq_tokens_to_ids )
if str(UpperCamelCase__ ) not in self.fairseq_tokens_to_ids:
A__ : Optional[int] =len(self.fairseq_tokens_to_ids )
A__ : int ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : int ):
A__ : Any =self.__dict__.copy()
A__ : Optional[Any] =None
A__ : Optional[Any] =self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[Any] , UpperCamelCase__ : str ):
A__ : Optional[int] =d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A__ : List[str] ={}
A__ : Tuple =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _UpperCAmelCase ( self : Any , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ : List[str] =[self.cls_token_id]
A__ : Optional[int] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCAmelCase ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] + ([0] * len(UpperCamelCase__ )) + [1]
def _UpperCAmelCase ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : Tuple =[self.sep_token_id]
A__ : str =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _UpperCAmelCase ( self : str ):
return len(self.fairseq_ids_to_tokens )
def _UpperCAmelCase ( self : Dict ):
A__ : Any ={self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : str ):
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def _UpperCAmelCase ( self : int , UpperCamelCase__ : int ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : Dict ):
return self.fairseq_ids_to_tokens[index]
def _UpperCAmelCase ( self : int , UpperCamelCase__ : Dict ):
A__ : Dict ="".join(UpperCamelCase__ ).replace(UpperCamelCase__ , " " ).strip()
return out_string
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A__ : str =os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
A__ : Optional[Any] =os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["monolingual_vocab_file"] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
A__ : Dict =self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
UpperCamelCase__ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F'''{str(UpperCamelCase__ )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 595 | """simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Any = {"vocab_file": "sentencepiece.bpe.model"}
__A : List[Any] = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
}
}
__A : List[Any] = {
"camembert-base": 512,
}
__A : Optional[int] = "▁"
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : Tuple = VOCAB_FILES_NAMES
__magic_name__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int="<s>" , UpperCamelCase__ : Dict="</s>" , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : str="<s>" , UpperCamelCase__ : str="<unk>" , UpperCamelCase__ : List[str]="<pad>" , UpperCamelCase__ : Union[str, Any]="<mask>" , UpperCamelCase__ : Optional[int]=["<s>NOTUSED", "</s>NOTUSED"] , UpperCamelCase__ : Optional[Dict[str, Any]] = None , **UpperCamelCase__ : Dict , ):
# Mask token behave like a normal word, i.e. include the space before it
A__ : str =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
A__ : List[Any] ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
A__ : Union[str, Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase__ ) )
A__ : Any =vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
A__ : Optional[Any] ={"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3}
A__ : str =len(self.fairseq_tokens_to_ids )
A__ : Union[str, Any] =len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
A__ : Optional[int] ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def _UpperCAmelCase ( self : Any , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ : Optional[int] =[self.cls_token_id]
A__ : Union[str, Any] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCAmelCase ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] + ([0] * len(UpperCamelCase__ )) + [1]
def _UpperCAmelCase ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : Optional[int] =[self.sep_token_id]
A__ : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _UpperCAmelCase ( self : Any ):
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def _UpperCAmelCase ( self : Union[str, Any] ):
A__ : List[Any] ={self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _UpperCAmelCase ( self : str , UpperCamelCase__ : str ):
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : List[str] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(UpperCamelCase__ ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(UpperCamelCase__ )
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : Optional[Any] ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] ):
A__ : Dict =[]
A__ : Tuple =""
A__ : int =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCamelCase__ ) + token
A__ : List[Any] =True
A__ : Optional[int] =[]
else:
current_sub_tokens.append(UpperCamelCase__ )
A__ : Dict =False
out_string += self.sp_model.decode(UpperCamelCase__ )
return out_string.strip()
def __getstate__( self : Dict ):
A__ : Dict =self.__dict__.copy()
A__ : List[Any] =None
return state
def __setstate__( self : Any , UpperCamelCase__ : int ):
A__ : Union[str, Any] =d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A__ : List[str] ={}
A__ : int =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCAmelCase ( self : str , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A__ : int =os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
A__ : Union[str, Any] =self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
| 595 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Optional[int] = '''naver-clova-ix/donut-base-finetuned-docvqa'''
__lowercase : Any = (
'''This is a tool that answers a question about an document (pdf). It takes an input named `document` which '''
'''should be the document containing the information, as well as a `question` that is the question about the '''
'''document. It returns a text that contains the answer to the question.'''
)
__lowercase : Optional[int] = '''document_qa'''
__lowercase : Optional[Any] = AutoProcessor
__lowercase : Optional[Any] = VisionEncoderDecoderModel
__lowercase : str = ['''image''', '''text''']
__lowercase : Dict = ['''text''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]:
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*__UpperCAmelCase ,**__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> List[Any]:
lowerCAmelCase__ : Optional[int] = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
lowerCAmelCase__ : Tuple = task_prompt.replace("""{user_input}""" ,__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = self.pre_processor.tokenizer(
__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ,return_tensors="""pt""" ).input_ids
lowerCAmelCase__ : Dict = self.pre_processor(__UpperCAmelCase ,return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int:
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) ,decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) ,max_length=self.model.decoder.config.max_position_embeddings ,early_stopping=__UpperCAmelCase ,pad_token_id=self.pre_processor.tokenizer.pad_token_id ,eos_token_id=self.pre_processor.tokenizer.eos_token_id ,use_cache=__UpperCAmelCase ,num_beams=1 ,bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] ,return_dict_in_generate=__UpperCAmelCase ,).sequences
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple:
lowerCAmelCase__ : Optional[int] = self.pre_processor.batch_decode(__UpperCAmelCase )[0]
lowerCAmelCase__ : str = sequence.replace(self.pre_processor.tokenizer.eos_token ,"""""" )
lowerCAmelCase__ : Tuple = sequence.replace(self.pre_processor.tokenizer.pad_token ,"""""" )
lowerCAmelCase__ : Optional[Any] = re.sub(R"""<.*?>""" ,"""""" ,__UpperCAmelCase ,count=1 ).strip() # remove first task start token
lowerCAmelCase__ : int = self.pre_processor.tokenajson(__UpperCAmelCase )
return sequence["answer"]
| 565 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
_lowerCAmelCase = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = Github(os.environ["""GITHUB_TOKEN"""] )
lowerCAmelCase__ : Optional[Any] = g.get_repo("""huggingface/diffusers""" )
lowerCAmelCase__ : Tuple = repo.get_issues(state="""open""" )
for issue in open_issues:
lowerCAmelCase__ : Optional[int] = sorted(issue.get_comments() , key=lambda UpperCamelCase : i.created_at , reverse=UpperCamelCase )
lowerCAmelCase__ : Any = comments[0] if len(UpperCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="""closed""" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="""open""" )
issue.remove_from_labels("""stale""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
issue.add_to_labels("""stale""" )
if __name__ == "__main__":
main()
| 565 | 1 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
A_ , A_ : Union[str, Any] = array[indexa], array[indexa]
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if length > 1:
A_ : Any = int(length / 2 )
for i in range(_UpperCAmelCase , low + middle ):
comp_and_swap(_UpperCAmelCase , _UpperCAmelCase , i + middle , _UpperCAmelCase )
bitonic_merge(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
bitonic_merge(_UpperCAmelCase , low + middle , _UpperCAmelCase , _UpperCAmelCase )
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if length > 1:
A_ : str = int(length / 2 )
bitonic_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , 1 )
bitonic_sort(_UpperCAmelCase , low + middle , _UpperCAmelCase , 0 )
bitonic_merge(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
lowerCamelCase_ : Dict = input('Enter numbers separated by a comma:\n').strip()
lowerCamelCase_ : List[str] = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ') | 302 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = tempfile.mkdtemp()
# fmt: off
A_ : List[str] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
A_ : Union[str, Any] = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
A_ : Dict = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
A_ : Optional[int] = {'unk_token': '<unk>'}
A_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
A_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case_ ) )
A_ : str = {
'do_resize': True,
'size': 2_0,
'do_center_crop': True,
'crop_size': 1_8,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
A_ : List[str] = os.path.join(self.tmpdirname , snake_case_ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(snake_case_ , snake_case_ )
def lowerCamelCase_ ( self , **snake_case_ ):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCamelCase_ ( self , **snake_case_ ):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCamelCase_ ( self , **snake_case_ ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
A_ : int = [Image.fromarray(np.moveaxis(snake_case_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Dict = self.get_tokenizer()
A_ : Optional[Any] = self.get_rust_tokenizer()
A_ : Any = self.get_image_processor()
A_ : Any = CLIPSegProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
processor_slow.save_pretrained(self.tmpdirname )
A_ : List[str] = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case_ )
A_ : List[Any] = CLIPSegProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
processor_fast.save_pretrained(self.tmpdirname )
A_ : str = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , snake_case_ )
self.assertIsInstance(processor_fast.tokenizer , snake_case_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , snake_case_ )
self.assertIsInstance(processor_fast.image_processor , snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ : Optional[Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
A_ : List[str] = self.get_image_processor(do_normalize=snake_case_ , padding_value=1.0 )
A_ : Dict = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = self.get_image_processor()
A_ : Dict = self.get_tokenizer()
A_ : Tuple = CLIPSegProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
A_ : List[str] = self.prepare_image_inputs()
A_ : Dict = image_processor(snake_case_ , return_tensors='np' )
A_ : List[Any] = processor(images=snake_case_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = self.get_image_processor()
A_ : Optional[Any] = self.get_tokenizer()
A_ : List[Any] = CLIPSegProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
A_ : Tuple = 'lower newer'
A_ : List[str] = processor(text=snake_case_ )
A_ : Any = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Dict = self.get_image_processor()
A_ : Tuple = self.get_tokenizer()
A_ : List[str] = CLIPSegProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
A_ : Optional[Any] = 'lower newer'
A_ : Optional[Any] = self.prepare_image_inputs()
A_ : Any = processor(text=snake_case_ , images=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(snake_case_ ):
processor()
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : str = self.get_image_processor()
A_ : Dict = self.get_tokenizer()
A_ : Optional[int] = CLIPSegProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
A_ : Optional[int] = self.prepare_image_inputs()
A_ : List[str] = self.prepare_image_inputs()
A_ : int = processor(images=snake_case_ , visual_prompt=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'conditional_pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(snake_case_ ):
processor()
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = self.get_image_processor()
A_ : List[str] = self.get_tokenizer()
A_ : Optional[Any] = CLIPSegProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
A_ : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : Tuple = processor.batch_decode(snake_case_ )
A_ : Optional[int] = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ ) | 302 | 1 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__lowerCamelCase )
class UpperCAmelCase ( __lowerCamelCase ):
a__: str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
a__: ClassVar[Features] = Features({"""audio""": Audio()} )
a__: ClassVar[Features] = Features({"""transcription""": Value("""string""" )} )
a__: str = "audio"
a__: str = "transcription"
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase : Tuple ):
if self.audio_column not in features:
raise ValueError(f'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , lowerCAmelCase ):
raise ValueError(f'''Column {self.audio_column} is not an Audio type.''' )
lowercase : str = copy.deepcopy(self )
lowercase : List[Any] = self.input_schema.copy()
lowercase : Optional[Any] = features[self.audio_column]
lowercase : str = input_schema
return task_template
@property
def _lowerCAmelCase ( self : Dict ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 583 |
class UpperCAmelCase :
def __init__( self : Union[str, Any] , lowerCAmelCase : str = "" , lowerCAmelCase : bool = False ):
# Mapping from the first character of the prefix of the node
lowercase : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
lowercase : Union[str, Any] = is_leaf
lowercase : Optional[int] = prefix
def _lowerCAmelCase ( self : str , lowerCAmelCase : str ):
lowercase : Optional[int] = 0
for q, w in zip(self.prefix , lowerCAmelCase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def _lowerCAmelCase ( self : int , lowerCAmelCase : list[str] ):
for word in words:
self.insert(lowerCAmelCase )
def _lowerCAmelCase ( self : Tuple , lowerCAmelCase : str ):
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
lowercase : List[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowercase : Tuple = RadixNode(prefix=lowerCAmelCase , is_leaf=lowerCAmelCase )
else:
lowercase : Union[str, Any] = self.nodes[word[0]]
lowercase , lowercase , lowercase : Any = incoming_node.match(
lowerCAmelCase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(lowerCAmelCase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowercase : Tuple = remaining_prefix
lowercase : Tuple = self.nodes[matching_string[0]]
lowercase : Optional[Any] = RadixNode(lowerCAmelCase , lowerCAmelCase )
lowercase : Any = aux_node
if remaining_word == "":
lowercase : Tuple = True
else:
self.nodes[matching_string[0]].insert(lowerCAmelCase )
def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase : str ):
lowercase : List[Any] = self.nodes.get(word[0] , lowerCAmelCase )
if not incoming_node:
return False
else:
lowercase , lowercase , lowercase : Dict = incoming_node.match(
lowerCAmelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(lowerCAmelCase )
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase : str ):
lowercase : Optional[int] = self.nodes.get(word[0] , lowerCAmelCase )
if not incoming_node:
return False
else:
lowercase , lowercase , lowercase : Tuple = incoming_node.match(
lowerCAmelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(lowerCAmelCase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowercase : List[Any] = list(self.nodes.values() )[0]
lowercase : List[Any] = merging_node.is_leaf
self.prefix += merging_node.prefix
lowercase : List[str] = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowercase : Dict = False
# If there is 1 edge, we merge it with its child
else:
lowercase : Optional[int] = list(incoming_node.nodes.values() )[0]
lowercase : Dict = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowercase : Dict = merging_node.nodes
return True
def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase : int = 0 ):
if self.prefix != "":
print('''-''' * height , self.prefix , ''' (leaf)''' if self.is_leaf else '''''' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def lowerCamelCase_ ( ):
lowercase : Optional[int] = '''banana bananas bandana band apple all beast'''.split()
lowercase : str = RadixNode()
root.insert_many(UpperCAmelCase_ )
assert all(root.find(UpperCAmelCase_ ) for word in words )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def lowerCamelCase_ ( ):
assert test_trie()
def lowerCamelCase_ ( ):
lowercase : List[str] = RadixNode()
lowercase : Optional[int] = '''banana bananas bandanas bandana band apple all beast'''.split()
root.insert_many(UpperCAmelCase_ )
print('''Words:''' , UpperCAmelCase_ )
print('''Tree:''' )
root.print_tree()
if __name__ == "__main__":
main()
| 583 | 1 |
"""simple docstring"""
A__ : Dict= [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A__ : Union[str, Any]= [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A__ : Optional[int]= {
0: """Sunday""",
1: """Monday""",
2: """Tuesday""",
3: """Wednesday""",
4: """Thursday""",
5: """Friday""",
6: """Saturday""",
}
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
assert len(str(SCREAMING_SNAKE_CASE ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
UpperCamelCase__ = year // 1_00
UpperCamelCase__ = (5 * (century % 4) + 2) % 7
UpperCamelCase__ = year % 1_00
UpperCamelCase__ = centurian % 12
UpperCamelCase__ = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
UpperCamelCase__ = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
UpperCamelCase__ = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
A__ : List[Any]= ["""bert-base-uncased""", """bert-base-cased"""]
A__ : Optional[int]= """hf-internal-testing/tiny-bert-tf-only"""
if is_tf_available():
class __lowerCamelCase ( tf.keras.Model ):
def __init__( self , snake_case_ ) -> Optional[int]:
super().__init__()
UpperCamelCase__ = tokenizer
UpperCamelCase__ = AutoConfig.from_pretrained(snake_case_ )
UpperCamelCase__ = TFAutoModel.from_config(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
UpperCamelCase__ = self.tokenizer(snake_case_ )
UpperCamelCase__ = self.bert(**snake_case_ )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class __lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
super().setUp()
UpperCamelCase__ = [
BertTokenizer.from_pretrained(snake_case_ ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
UpperCamelCase__ = [TFBertTokenizer.from_pretrained(snake_case_ ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(snake_case_ , use_fast_bert_tokenizer=snake_case_ )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
UpperCamelCase__ = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
UpperCamelCase__ = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCamelCase__ = tokenizer(snake_case_ , return_tensors='tf' , padding='longest' )
UpperCamelCase__ = tf_tokenizer(snake_case_ )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ = tf_tokenizer(self.paired_sentences )
UpperCamelCase__ = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ = tf.function(snake_case_ )
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCamelCase__ = tf.constant(snake_case_ )
UpperCamelCase__ = compiled_tokenizer(snake_case_ )
UpperCamelCase__ = tf_tokenizer(snake_case_ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ = ModelToSave(tokenizer=snake_case_ )
UpperCamelCase__ = tf.convert_to_tensor(self.test_sentences )
UpperCamelCase__ = model(snake_case_ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
UpperCamelCase__ = Path(snake_case_ ) / 'saved.model'
model.save(snake_case_ )
UpperCamelCase__ = tf.keras.models.load_model(snake_case_ )
UpperCamelCase__ = loaded_model(snake_case_ )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
| 20 | 1 |
'''simple docstring'''
__snake_case : str = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
__snake_case : Dict = ['a', 'b', 'c', 'd', 'e']
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Any, __snake_case : int ) -> Union[str, Any]:
"""simple docstring"""
A__ : Union[str, Any] =start
# add current to visited
visited.append(__snake_case )
A__ : Dict =edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
A__ : Any =topological_sort(__snake_case, __snake_case, __snake_case )
# if all neighbors visited add current to sort
sort.append(__snake_case )
# if all vertices haven't been visited select a new one to visit
if len(__snake_case ) != len(__snake_case ):
for vertice in vertices:
if vertice not in visited:
A__ : List[str] =topological_sort(__snake_case, __snake_case, __snake_case )
# return sort
return sort
if __name__ == "__main__":
__snake_case : Optional[int] = topological_sort('a', [], [])
print(sort)
| 215 |
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'pipelines_utils',
'0.22.0',
'Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.',
standard_warn=False,
stacklevel=3,
)
| 215 | 1 |
"""simple docstring"""
import enum
import shutil
import sys
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE =shutil.get_terminal_size()
__SCREAMING_SNAKE_CASE ={"UP": "A", "DOWN": "B", "RIGHT": "C", "LEFT": "D"}
class UpperCamelCase ( enum.Enum ):
lowercase = 0
lowercase = 1
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str="" ):
sys.stdout.write(str(__SCREAMING_SNAKE_CASE ) + end )
sys.stdout.flush()
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any]="" ):
forceWrite(F'''\u001b[{color}m{content}\u001b[0m''' , __SCREAMING_SNAKE_CASE )
def lowercase__( ):
forceWrite('\r' )
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str ):
forceWrite(F'''\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}''' )
def lowercase__( ):
forceWrite(' ' * TERMINAL_WIDTH )
reset_cursor()
def lowercase__( ):
reset_cursor()
forceWrite('-' * TERMINAL_WIDTH )
| 715 | """simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
lowercase_ : Optional[int] = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' ,safety_checker=__UpperCamelCase ,cache_dir=__UpperCamelCase )
lowercase_ : Dict = [t[-1] for t in os.walk(os.path.join(__UpperCamelCase ,os.listdir(__UpperCamelCase )[0] ,'snapshots' ) )]
lowercase_ : Tuple = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ , lowercase_ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' ,safety_checker=__UpperCamelCase )
lowercase_ : Dict = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowercase_ : Optional[Any] = jax.random.PRNGKey(0 )
lowercase_ : Optional[int] = 4
lowercase_ : int = jax.device_count()
lowercase_ : Optional[int] = num_samples * [prompt]
lowercase_ : List[Any] = pipeline.prepare_inputs(__UpperCamelCase )
# shard inputs and rng
lowercase_ : Dict = replicate(__UpperCamelCase )
lowercase_ : Optional[Any] = jax.random.split(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Dict = shard(__UpperCamelCase )
lowercase_ : List[Any] = pipeline(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,jit=__UpperCamelCase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 4.151_4745 ) < 1e-3
assert np.abs(np.abs(__UpperCamelCase ,dtype=np.floataa ).sum() - 4_9947.875 ) < 5e-1
lowercase_ : List[Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(__UpperCamelCase ) == num_samples
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ , lowercase_ : Any = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='flax' ,safety_checker=__UpperCamelCase )
lowercase_ : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowercase_ : List[str] = jax.random.PRNGKey(0 )
lowercase_ : Tuple = 50
lowercase_ : Optional[Any] = jax.device_count()
lowercase_ : List[Any] = num_samples * [prompt]
lowercase_ : Union[str, Any] = pipeline.prepare_inputs(__UpperCamelCase )
# shard inputs and rng
lowercase_ : List[str] = replicate(__UpperCamelCase )
lowercase_ : Optional[int] = jax.random.split(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Optional[Any] = shard(__UpperCamelCase )
lowercase_ : Optional[int] = pipeline(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,jit=__UpperCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.0565_2401) ) < 1e-3
assert np.abs((np.abs(__UpperCamelCase ,dtype=np.floataa ).sum() - 238_3808.2) ) < 5e-1
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ , lowercase_ : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='bf16' ,dtype=jnp.bfloataa ,safety_checker=__UpperCamelCase )
lowercase_ : List[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowercase_ : Dict = jax.random.PRNGKey(0 )
lowercase_ : Optional[int] = 50
lowercase_ : Tuple = jax.device_count()
lowercase_ : Dict = num_samples * [prompt]
lowercase_ : Any = pipeline.prepare_inputs(__UpperCamelCase )
# shard inputs and rng
lowercase_ : int = replicate(__UpperCamelCase )
lowercase_ : int = jax.random.split(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Union[str, Any] = shard(__UpperCamelCase )
lowercase_ : List[str] = pipeline(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,jit=__UpperCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.0400_3906) ) < 1e-3
assert np.abs((np.abs(__UpperCamelCase ,dtype=np.floataa ).sum() - 237_3516.75) ) < 5e-1
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ , lowercase_ : str = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='bf16' ,dtype=jnp.bfloataa )
lowercase_ : Optional[int] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowercase_ : Optional[Any] = jax.random.PRNGKey(0 )
lowercase_ : Union[str, Any] = 50
lowercase_ : Optional[Any] = jax.device_count()
lowercase_ : Tuple = num_samples * [prompt]
lowercase_ : Union[str, Any] = pipeline.prepare_inputs(__UpperCamelCase )
# shard inputs and rng
lowercase_ : List[Any] = replicate(__UpperCamelCase )
lowercase_ : Union[str, Any] = jax.random.split(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Optional[int] = shard(__UpperCamelCase )
lowercase_ : Optional[Any] = pipeline(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,jit=__UpperCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.0400_3906) ) < 1e-3
assert np.abs((np.abs(__UpperCamelCase ,dtype=np.floataa ).sum() - 237_3516.75) ) < 5e-1
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Tuple = FlaxDDIMScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule='scaled_linear' ,set_alpha_to_one=__UpperCamelCase ,steps_offset=1 ,)
lowercase_ , lowercase_ : str = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='bf16' ,dtype=jnp.bfloataa ,scheduler=__UpperCamelCase ,safety_checker=__UpperCamelCase ,)
lowercase_ : Optional[int] = scheduler.create_state()
lowercase_ : List[Any] = scheduler_state
lowercase_ : List[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowercase_ : str = jax.random.PRNGKey(0 )
lowercase_ : str = 50
lowercase_ : List[Any] = jax.device_count()
lowercase_ : List[str] = num_samples * [prompt]
lowercase_ : Union[str, Any] = pipeline.prepare_inputs(__UpperCamelCase )
# shard inputs and rng
lowercase_ : List[Any] = replicate(__UpperCamelCase )
lowercase_ : int = jax.random.split(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : str = shard(__UpperCamelCase )
lowercase_ : str = pipeline(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,jit=__UpperCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.0_4504_3945) ) < 1e-3
assert np.abs((np.abs(__UpperCamelCase ,dtype=np.floataa ).sum() - 234_7693.5) ) < 5e-1
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowercase_ : Union[str, Any] = jax.device_count()
lowercase_ : List[str] = num_samples * [prompt]
lowercase_ : int = jax.random.split(jax.random.PRNGKey(0 ) ,__UpperCamelCase )
lowercase_ , lowercase_ : Dict = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='bf16' ,dtype=jnp.bfloataa ,safety_checker=__UpperCamelCase ,)
lowercase_ : str = replicate(__UpperCamelCase )
lowercase_ : int = pipeline.prepare_inputs(__UpperCamelCase )
lowercase_ : Optional[int] = shard(__UpperCamelCase )
lowercase_ : Tuple = pipeline(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,jit=__UpperCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
lowercase_ : Union[str, Any] = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
lowercase_ , lowercase_ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='bf16' ,dtype=jnp.bfloataa ,safety_checker=__UpperCamelCase ,use_memory_efficient_attention=__UpperCamelCase ,)
lowercase_ : List[Any] = replicate(__UpperCamelCase )
lowercase_ : Union[str, Any] = pipeline.prepare_inputs(__UpperCamelCase )
lowercase_ : List[Any] = shard(__UpperCamelCase )
lowercase_ : Any = pipeline(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,jit=__UpperCamelCase ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
lowercase_ : Optional[int] = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 477 | 0 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def _lowercase ( ):
_a = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
_a = Dataset.from_dict(lowerCamelCase__ )
return dataset
class A ( a ):
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = get_dataset()
_a = make_duplicate_clusters(snake_case_ , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def __lowerCAmelCase ( self ) -> int:
_a = get_dataset()
_a , _a = deduplicate_dataset(snake_case_ )
self.assertEqual(len(snake_case_ ) , 2 )
print(snake_case_ )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , snake_case_ )
| 131 |
'''simple docstring'''
from math import pi, sqrt
def _lowercase ( lowerCamelCase__ : float ):
if num <= 0:
raise ValueError("math domain error" )
if num > 1_71.5:
raise OverflowError("math range error" )
elif num - int(lowerCamelCase__ ) not in (0, 0.5):
raise NotImplementedError("num must be an integer or a half-integer" )
elif num == 0.5:
return sqrt(lowerCamelCase__ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def _lowercase ( ):
assert gamma(0.5 ) == sqrt(lowerCamelCase__ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
__snake_case : Union[str, Any] = 1.0
while num:
__snake_case : Optional[Any] = float(input("Gamma of: "))
print(f'''gamma({num}) = {gamma(num)}''')
print("\nEnter 0 to exit...")
| 131 | 1 |
"""simple docstring"""
def snake_case ( lowerCAmelCase_ = 1000 ) -> int:
_snake_case , _snake_case = 1, 1
_snake_case = 2
while True:
_snake_case = 0
_snake_case = fa + fa
_snake_case , _snake_case = fa, f
index += 1
for _ in str(lowerCAmelCase_ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 404 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
A__ : Tuple = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Union[str, Any] , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : Optional[int] ):
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __UpperCAmelCase ( cls : List[Any] , *__lowerCamelCase : Tuple , **__lowerCamelCase : Tuple ):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *__lowerCamelCase : Tuple , **__lowerCamelCase : Optional[Any] ):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class UpperCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
A__ : Union[str, Any] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : List[str] , *__lowerCamelCase : int , **__lowerCamelCase : Tuple ):
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : List[str] ):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : List[Any] ):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class UpperCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
A__ : Optional[Any] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Dict , *__lowerCamelCase : str , **__lowerCamelCase : Optional[Any] ):
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __UpperCAmelCase ( cls : int , *__lowerCamelCase : Dict , **__lowerCamelCase : Optional[Any] ):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , *__lowerCamelCase : List[str] , **__lowerCamelCase : Tuple ):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class UpperCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
A__ : List[Any] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Optional[int] , *__lowerCamelCase : List[str] , **__lowerCamelCase : Dict ):
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __UpperCAmelCase ( cls : str , *__lowerCamelCase : Tuple , **__lowerCamelCase : Tuple ):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Any ):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class UpperCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
A__ : Union[str, Any] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Optional[Any] , *__lowerCamelCase : str , **__lowerCamelCase : Optional[Any] ):
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , *__lowerCamelCase : int , **__lowerCamelCase : Tuple ):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __UpperCAmelCase ( cls : List[str] , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : List[str] ):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class UpperCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
A__ : Dict = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : List[str] , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : Optional[Any] ):
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __UpperCAmelCase ( cls : List[Any] , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : Tuple ):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __UpperCAmelCase ( cls : List[Any] , *__lowerCamelCase : Any , **__lowerCamelCase : Optional[Any] ):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
| 404 | 1 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__a = logging.get_logger(__name__)
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''linear'''
lowerCAmelCase = '''cosine'''
lowerCAmelCase = '''cosine_with_restarts'''
lowerCAmelCase = '''polynomial'''
lowerCAmelCase = '''constant'''
lowerCAmelCase = '''constant_with_warmup'''
lowerCAmelCase = '''piecewise_constant'''
def lowerCamelCase__ ( _lowercase , _lowercase = -1 ):
'''simple docstring'''
return LambdaLR(_lowercase , lambda _lowercase : 1 , last_epoch=_lowercase )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase = -1 ):
'''simple docstring'''
def lr_lambda(_lowercase ):
if current_step < num_warmup_steps:
return float(_lowercase ) / float(max(1.0 , _lowercase ) )
return 1.0
return LambdaLR(_lowercase , _lowercase , last_epoch=_lowercase )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase = -1 ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = {}
UpperCAmelCase_ : Tuple = step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = rule_str.split(''':''' )
UpperCAmelCase_ : List[Any] = int(_lowercase )
UpperCAmelCase_ : Optional[int] = float(_lowercase )
UpperCAmelCase_ : List[str] = value
UpperCAmelCase_ : Optional[Any] = float(rule_list[-1] )
def create_rules_function(_lowercase , _lowercase ):
def rule_func(_lowercase ) -> float:
UpperCAmelCase_ : str = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_lowercase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
UpperCAmelCase_ : Dict = create_rules_function(_lowercase , _lowercase )
return LambdaLR(_lowercase , _lowercase , last_epoch=_lowercase )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase=-1 ):
'''simple docstring'''
def lr_lambda(_lowercase ):
if current_step < num_warmup_steps:
return float(_lowercase ) / float(max(1 , _lowercase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_lowercase , _lowercase , _lowercase )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase = 0.5 , _lowercase = -1 ):
'''simple docstring'''
def lr_lambda(_lowercase ):
if current_step < num_warmup_steps:
return float(_lowercase ) / float(max(1 , _lowercase ) )
UpperCAmelCase_ : Optional[int] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_lowercase ) * 2.0 * progress )) )
return LambdaLR(_lowercase , _lowercase , _lowercase )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase = 1 , _lowercase = -1 ):
'''simple docstring'''
def lr_lambda(_lowercase ):
if current_step < num_warmup_steps:
return float(_lowercase ) / float(max(1 , _lowercase ) )
UpperCAmelCase_ : List[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_lowercase ) * progress) % 1.0) )) )
return LambdaLR(_lowercase , _lowercase , _lowercase )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase=1E-7 , _lowercase=1.0 , _lowercase=-1 ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(f'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(_lowercase ):
if current_step < num_warmup_steps:
return float(_lowercase ) / float(max(1 , _lowercase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
UpperCAmelCase_ : Tuple = lr_init - lr_end
UpperCAmelCase_ : Tuple = num_training_steps - num_warmup_steps
UpperCAmelCase_ : List[str] = 1 - (current_step - num_warmup_steps) / decay_steps
UpperCAmelCase_ : str = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_lowercase , _lowercase , _lowercase )
__a = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = 1 , _lowercase = 1.0 , _lowercase = -1 , ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = SchedulerType(_lowercase )
UpperCAmelCase_ : Optional[Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_lowercase , last_epoch=_lowercase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_lowercase , step_rules=_lowercase , last_epoch=_lowercase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_lowercase , num_warmup_steps=_lowercase , last_epoch=_lowercase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_lowercase , num_warmup_steps=_lowercase , num_training_steps=_lowercase , num_cycles=_lowercase , last_epoch=_lowercase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_lowercase , num_warmup_steps=_lowercase , num_training_steps=_lowercase , power=_lowercase , last_epoch=_lowercase , )
return schedule_func(
_lowercase , num_warmup_steps=_lowercase , num_training_steps=_lowercase , last_epoch=_lowercase ) | 30 |
'''simple docstring'''
from statistics import mean, stdev
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 3 ):
__a : List[str] = min(SCREAMING_SNAKE_CASE__ )
__a : Tuple = max(SCREAMING_SNAKE_CASE__ )
# normalize data
return [round((x - x_min) / (x_max - x_min) , SCREAMING_SNAKE_CASE__ ) for x in data]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 3 ):
__a : Dict = mean(SCREAMING_SNAKE_CASE__ )
__a : str = stdev(SCREAMING_SNAKE_CASE__ )
# standardize data
return [round((x - mu) / (sigma) , SCREAMING_SNAKE_CASE__ ) for x in data]
| 597 | 0 |
"""simple docstring"""
__lowerCAmelCase : List[str] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__lowerCAmelCase : List[str] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__lowerCAmelCase : Union[str, Any] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 21 |
"""simple docstring"""
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
__lowerCAmelCase : Optional[Any] = parser.parse_args()
__lowerCAmelCase : Dict = '''cpu'''
__lowerCAmelCase : Optional[Any] = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
__lowerCAmelCase : Tuple = '''path-to-your-trained-model'''
__lowerCAmelCase : List[Any] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__lowerCAmelCase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__lowerCAmelCase : List[Any] = pipe.to(device)
# to channels last
__lowerCAmelCase : Optional[Any] = pipe.unet.to(memory_format=torch.channels_last)
__lowerCAmelCase : List[str] = pipe.vae.to(memory_format=torch.channels_last)
__lowerCAmelCase : Optional[Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__lowerCAmelCase : Dict = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__lowerCAmelCase : Tuple = torch.randn(2, 4, 64, 64)
__lowerCAmelCase : Any = torch.rand(1) * 999
__lowerCAmelCase : List[str] = torch.randn(2, 77, 768)
__lowerCAmelCase : Optional[int] = (sample, timestep, encoder_hidden_status)
try:
__lowerCAmelCase : List[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__lowerCAmelCase : Optional[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__lowerCAmelCase : Any = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__lowerCAmelCase : int = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__lowerCAmelCase : List[str] = 666
__lowerCAmelCase : Optional[int] = torch.Generator(device).manual_seed(seed)
__lowerCAmelCase : List[Any] = {'''generator''': generator}
if args.steps is not None:
__lowerCAmelCase : Any = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__lowerCAmelCase : str = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 21 | 1 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Dict = """Wav2Vec2FeatureExtractor"""
a__ : str = """AutoTokenizer"""
def __init__( self , __lowercase , __lowercase) -> Any:
super().__init__(__lowercase , __lowercase)
__UpperCamelCase :int = self.feature_extractor
__UpperCamelCase :str = False
@classmethod
def UpperCamelCase__ ( cls , __lowercase , **__lowercase) -> Optional[int]:
try:
return super().from_pretrained(__lowercase , **__lowercase)
except OSError:
warnings.warn(
f"""Loading a tokenizer inside {cls.__name__} from a config that does not"""
''' include a `tokenizer_class` attribute is deprecated and will be '''
'''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'''
''' attribute to either your `config.json` or `tokenizer_config.json` '''
'''file to suppress this warning: ''' , __lowercase , )
__UpperCamelCase :Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(__lowercase , **__lowercase)
__UpperCamelCase :Optional[int] = WavaVecaCTCTokenizer.from_pretrained(__lowercase , **__lowercase)
return cls(feature_extractor=__lowercase , tokenizer=__lowercase)
def __call__( self , *__lowercase , **__lowercase) -> str:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__lowercase , **__lowercase)
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''')
__UpperCamelCase :Union[str, Any] = kwargs.pop('''raw_speech''')
else:
__UpperCamelCase :Union[str, Any] = kwargs.pop('''audio''' , __lowercase)
__UpperCamelCase :List[str] = kwargs.pop('''sampling_rate''' , __lowercase)
__UpperCamelCase :str = kwargs.pop('''text''' , __lowercase)
if len(__lowercase) > 0:
__UpperCamelCase :str = args[0]
__UpperCamelCase :Optional[Any] = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''')
if audio is not None:
__UpperCamelCase :Optional[int] = self.feature_extractor(__lowercase , *__lowercase , sampling_rate=__lowercase , **__lowercase)
if text is not None:
__UpperCamelCase :Union[str, Any] = self.tokenizer(__lowercase , **__lowercase)
if text is None:
return inputs
elif audio is None:
return encodings
else:
__UpperCamelCase :Optional[Any] = encodings['''input_ids''']
return inputs
def UpperCamelCase__ ( self , *__lowercase , **__lowercase) -> Any:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*__lowercase , **__lowercase)
__UpperCamelCase :int = kwargs.pop('''input_features''' , __lowercase)
__UpperCamelCase :Union[str, Any] = kwargs.pop('''labels''' , __lowercase)
if len(__lowercase) > 0:
__UpperCamelCase :Union[str, Any] = args[0]
__UpperCamelCase :str = args[1:]
if input_features is not None:
__UpperCamelCase :List[Any] = self.feature_extractor.pad(__lowercase , *__lowercase , **__lowercase)
if labels is not None:
__UpperCamelCase :Dict = self.tokenizer.pad(__lowercase , **__lowercase)
if labels is None:
return input_features
elif input_features is None:
return labels
else:
__UpperCamelCase :int = labels['''input_ids''']
return input_features
def UpperCamelCase__ ( self , *__lowercase , **__lowercase) -> Tuple:
return self.tokenizer.batch_decode(*__lowercase , **__lowercase)
def UpperCamelCase__ ( self , *__lowercase , **__lowercase) -> Any:
return self.tokenizer.decode(*__lowercase , **__lowercase)
@contextmanager
def UpperCamelCase__ ( self) -> Tuple:
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''')
__UpperCamelCase :Union[str, Any] = True
__UpperCamelCase :Dict = self.tokenizer
yield
__UpperCamelCase :Optional[Any] = self.feature_extractor
__UpperCamelCase :str = False
| 167 | import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__lowercase = logging.getLogger(__name__)
@dataclass
class lowerCamelCase_ :
'''simple docstring'''
a__ : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a__ : Optional[str] = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
a__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a__ : bool = field(default=UpperCAmelCase_ , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
a__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class lowerCamelCase_ :
'''simple docstring'''
a__ : str = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
a__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
a__ : int = field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a__ : bool = field(
default=UpperCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Optional[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
__UpperCamelCase :Union[str, Any] = import_module('''tasks''' )
try:
__UpperCamelCase :int = getattr(SCREAMING_SNAKE_CASE , model_args.task_type )
__UpperCamelCase :TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
__UpperCamelCase :Optional[Any] = token_classification_task.get_labels(data_args.labels )
__UpperCamelCase :Dict[int, str] = dict(enumerate(SCREAMING_SNAKE_CASE ) )
__UpperCamelCase :Tuple = len(SCREAMING_SNAKE_CASE )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCamelCase :Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid={label: i for i, label in enumerate(SCREAMING_SNAKE_CASE )} , cache_dir=model_args.cache_dir , )
__UpperCamelCase :Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
__UpperCamelCase :str = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# Get datasets
__UpperCamelCase :Dict = (
TokenClassificationDataset(
token_classification_task=SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__UpperCamelCase :Optional[int] = (
TokenClassificationDataset(
token_classification_task=SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple[List[int], List[int]]:
__UpperCamelCase :Optional[int] = np.argmax(SCREAMING_SNAKE_CASE , axis=2 )
__UpperCamelCase , __UpperCamelCase :int = preds.shape
__UpperCamelCase :str = [[] for _ in range(SCREAMING_SNAKE_CASE )]
__UpperCamelCase :int = [[] for _ in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(SCREAMING_SNAKE_CASE ) -> Dict:
__UpperCamelCase , __UpperCamelCase :Optional[Any] = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),
"precision": precision_score(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),
"recall": recall_score(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),
"f1": fa_score(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),
}
# Data collator
__UpperCamelCase :Tuple = DataCollatorWithPadding(SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__UpperCamelCase :Union[str, Any] = Trainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , compute_metrics=SCREAMING_SNAKE_CASE , data_collator=SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCamelCase :int = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__UpperCamelCase :Union[str, Any] = trainer.evaluate()
__UpperCamelCase :List[str] = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
writer.write('''%s = %s\n''' % (key, value) )
results.update(SCREAMING_SNAKE_CASE )
# Predict
if training_args.do_predict:
__UpperCamelCase :Any = TokenClassificationDataset(
token_classification_task=SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :int = trainer.predict(SCREAMING_SNAKE_CASE )
__UpperCamelCase , __UpperCamelCase :Tuple = align_predictions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Tuple = os.path.join(training_args.output_dir , '''test_results.txt''' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE , '''w''' ) as writer:
for key, value in metrics.items():
logger.info(''' %s = %s''' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
writer.write('''%s = %s\n''' % (key, value) )
# Save predictions
__UpperCamelCase :List[str] = os.path.join(training_args.output_dir , '''test_predictions.txt''' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE , '''w''' ) as writer:
with open(os.path.join(data_args.data_dir , '''test.txt''' ) , '''r''' ) as f:
token_classification_task.write_predictions_to_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return results
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 167 | 1 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def snake_case ( UpperCAmelCase : List[Any], UpperCAmelCase : List[Any]=False ):
try:
A = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
A = default
else:
# KEY is set, convert it to True or False.
try:
A = strtobool(UpperCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
lowerCAmelCase_ = parse_flag_from_env('RUN_SLOW', default=False)
def snake_case ( UpperCAmelCase : Dict ):
return unittest.skip('Test was skipped' )(UpperCAmelCase )
def snake_case ( UpperCAmelCase : int ):
return unittest.skipUnless(_run_slow_tests, 'test is slow' )(UpperCAmelCase )
def snake_case ( UpperCAmelCase : List[Any] ):
return unittest.skipUnless(not torch.cuda.is_available(), 'test requires only a CPU' )(UpperCAmelCase )
def snake_case ( UpperCAmelCase : Tuple ):
return unittest.skipUnless(torch.cuda.is_available(), 'test requires a GPU' )(UpperCAmelCase )
def snake_case ( UpperCAmelCase : List[Any] ):
return unittest.skipUnless(is_xpu_available(), 'test requires a XPU' )(UpperCAmelCase )
def snake_case ( UpperCAmelCase : str ):
return unittest.skipUnless(is_mps_available(), 'test requires a `mps` backend support in `torch`' )(UpperCAmelCase )
def snake_case ( UpperCAmelCase : Union[str, Any] ):
return unittest.skipUnless(
is_transformers_available() and is_datasets_available(), 'test requires the Hugging Face suite' )(UpperCAmelCase )
def snake_case ( UpperCAmelCase : Optional[int] ):
return unittest.skipUnless(is_bnb_available(), 'test requires the bitsandbytes library' )(UpperCAmelCase )
def snake_case ( UpperCAmelCase : str ):
return unittest.skipUnless(is_tpu_available(), 'test requires TPU' )(UpperCAmelCase )
def snake_case ( UpperCAmelCase : Dict ):
return unittest.skipUnless(torch.cuda.device_count() == 1, 'test requires a GPU' )(UpperCAmelCase )
def snake_case ( UpperCAmelCase : Tuple ):
return unittest.skipUnless(torch.xpu.device_count() == 1, 'test requires a XPU' )(UpperCAmelCase )
def snake_case ( UpperCAmelCase : Union[str, Any] ):
return unittest.skipUnless(torch.cuda.device_count() > 1, 'test requires multiple GPUs' )(UpperCAmelCase )
def snake_case ( UpperCAmelCase : int ):
return unittest.skipUnless(torch.xpu.device_count() > 1, 'test requires multiple XPUs' )(UpperCAmelCase )
def snake_case ( UpperCAmelCase : int ):
return unittest.skipUnless(is_safetensors_available(), 'test requires safetensors' )(UpperCAmelCase )
def snake_case ( UpperCAmelCase : Optional[int] ):
return unittest.skipUnless(is_deepspeed_available(), 'test requires DeepSpeed' )(UpperCAmelCase )
def snake_case ( UpperCAmelCase : Dict ):
return unittest.skipUnless(is_torch_version('>=', '1.12.0' ), 'test requires torch version >= 1.12.0' )(UpperCAmelCase )
def snake_case ( UpperCAmelCase : Optional[int]=None, UpperCAmelCase : Optional[int]=None ):
if test_case is None:
return partial(UpperCAmelCase, version=UpperCAmelCase )
return unittest.skipUnless(is_torch_version('>=', UpperCAmelCase ), f'test requires torch version >= {version}' )(UpperCAmelCase )
def snake_case ( UpperCAmelCase : int ):
return unittest.skipUnless(is_tensorboard_available(), 'test requires Tensorboard' )(UpperCAmelCase )
def snake_case ( UpperCAmelCase : str ):
return unittest.skipUnless(is_wandb_available(), 'test requires wandb' )(UpperCAmelCase )
def snake_case ( UpperCAmelCase : str ):
return unittest.skipUnless(is_comet_ml_available(), 'test requires comet_ml' )(UpperCAmelCase )
lowerCAmelCase_ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def snake_case ( UpperCAmelCase : List[str] ):
return unittest.skipUnless(
_atleast_one_tracker_available, 'test requires at least one tracker to be available and for `comet_ml` to not be installed', )(UpperCAmelCase )
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case = True
@classmethod
def A( cls : Union[str, Any] ) -> Tuple:
'''simple docstring'''
A = tempfile.mkdtemp()
@classmethod
def A( cls : List[str] ) -> Tuple:
'''simple docstring'''
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def A( self : Dict ) -> Dict:
'''simple docstring'''
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(_SCREAMING_SNAKE_CASE )
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A( self : Dict ) -> Any:
'''simple docstring'''
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A( self : List[Any] ,_SCREAMING_SNAKE_CASE : Union[mock.Mock, List[mock.Mock]] ) -> int:
'''simple docstring'''
A = mocks if isinstance(_SCREAMING_SNAKE_CASE ,(tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def snake_case ( UpperCAmelCase : Optional[Any] ):
A = AcceleratorState()
A = tensor[None].clone().to(state.device )
A = gather(UpperCAmelCase ).cpu()
A = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i], UpperCAmelCase ):
return False
return True
class UpperCamelCase :
"""simple docstring"""
def __init__( self : List[Any] ,_SCREAMING_SNAKE_CASE : Dict ,_SCREAMING_SNAKE_CASE : List[str] ,_SCREAMING_SNAKE_CASE : Any ) -> Tuple:
'''simple docstring'''
A = returncode
A = stdout
A = stderr
async def snake_case ( UpperCAmelCase : List[str], UpperCAmelCase : Any ):
while True:
A = await stream.readline()
if line:
callback(UpperCAmelCase )
else:
break
async def snake_case ( UpperCAmelCase : List[str], UpperCAmelCase : List[str]=None, UpperCAmelCase : Union[str, Any]=None, UpperCAmelCase : List[str]=None, UpperCAmelCase : Tuple=False, UpperCAmelCase : int=False ):
if echo:
print('\nRunning: ', ' '.join(UpperCAmelCase ) )
A = await asyncio.create_subprocess_exec(
cmd[0], *cmd[1:], stdin=UpperCAmelCase, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, env=UpperCAmelCase, )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
A = []
A = []
def tee(UpperCAmelCase : List[str], UpperCAmelCase : List[str], UpperCAmelCase : Dict, UpperCAmelCase : Any="" ):
A = line.decode('utf-8' ).rstrip()
sink.append(UpperCAmelCase )
if not quiet:
print(UpperCAmelCase, UpperCAmelCase, file=UpperCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout, lambda UpperCAmelCase : tee(UpperCAmelCase, UpperCAmelCase, sys.stdout, label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr, lambda UpperCAmelCase : tee(UpperCAmelCase, UpperCAmelCase, sys.stderr, label='stderr:' ) ) ),
], timeout=UpperCAmelCase, )
return _RunOutput(await p.wait(), UpperCAmelCase, UpperCAmelCase )
def snake_case ( UpperCAmelCase : Optional[int], UpperCAmelCase : List[str]=None, UpperCAmelCase : List[Any]=None, UpperCAmelCase : Union[str, Any]=1_80, UpperCAmelCase : List[Any]=False, UpperCAmelCase : Optional[int]=True ):
A = asyncio.get_event_loop()
A = loop.run_until_complete(
_stream_subprocess(UpperCAmelCase, env=UpperCAmelCase, stdin=UpperCAmelCase, timeout=UpperCAmelCase, quiet=UpperCAmelCase, echo=UpperCAmelCase ) )
A = ' '.join(UpperCAmelCase )
if result.returncode > 0:
A = '\n'.join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
return result
class UpperCamelCase ( snake_case__ ):
"""simple docstring"""
pass
def snake_case ( UpperCAmelCase : List[str], UpperCAmelCase : Dict=False ):
try:
A = subprocess.check_output(UpperCAmelCase, stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(UpperCAmelCase, 'decode' ):
A = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'Command `{" ".join(UpperCAmelCase )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 110 |
from collections.abc import Callable
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Tuple ,_SCREAMING_SNAKE_CASE : Callable | None = None ) -> None:
'''simple docstring'''
# Stores actual heap items.
A = []
# Stores indexes of each item for supporting updates and deletion.
A = {}
# Stores current size of heap.
A = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
A = key or (lambda _SCREAMING_SNAKE_CASE : x)
def A( self : Tuple ,_SCREAMING_SNAKE_CASE : int ) -> int | None:
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def A( self : Union[str, Any] ,_SCREAMING_SNAKE_CASE : int ) -> int | None:
'''simple docstring'''
A = int(2 * i + 1 )
return left if 0 < left < self.size else None
def A( self : Union[str, Any] ,_SCREAMING_SNAKE_CASE : int ) -> int | None:
'''simple docstring'''
A = int(2 * i + 2 )
return right if 0 < right < self.size else None
def A( self : List[str] ,_SCREAMING_SNAKE_CASE : int ,_SCREAMING_SNAKE_CASE : int ) -> None:
'''simple docstring'''
A , A = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
A , A = self.arr[j], self.arr[i]
def A( self : List[str] ,_SCREAMING_SNAKE_CASE : int ,_SCREAMING_SNAKE_CASE : int ) -> bool:
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def A( self : int ,_SCREAMING_SNAKE_CASE : int ) -> int:
'''simple docstring'''
A = self._left(_SCREAMING_SNAKE_CASE )
A = self._right(_SCREAMING_SNAKE_CASE )
A = i
if left is not None and not self._cmp(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
A = left
if right is not None and not self._cmp(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
A = right
return valid_parent
def A( self : Any ,_SCREAMING_SNAKE_CASE : int ) -> None:
'''simple docstring'''
A = self._parent(_SCREAMING_SNAKE_CASE )
while parent is not None and not self._cmp(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
self._swap(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
A , A = parent, self._parent(_SCREAMING_SNAKE_CASE )
def A( self : List[Any] ,_SCREAMING_SNAKE_CASE : int ) -> None:
'''simple docstring'''
A = self._get_valid_parent(_SCREAMING_SNAKE_CASE )
while valid_parent != index:
self._swap(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
A , A = valid_parent, self._get_valid_parent(_SCREAMING_SNAKE_CASE )
def A( self : Optional[Any] ,_SCREAMING_SNAKE_CASE : int ,_SCREAMING_SNAKE_CASE : int ) -> None:
'''simple docstring'''
if item not in self.pos_map:
return
A = self.pos_map[item]
A = [item, self.key(_SCREAMING_SNAKE_CASE )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(_SCREAMING_SNAKE_CASE )
self._heapify_down(_SCREAMING_SNAKE_CASE )
def A( self : int ,_SCREAMING_SNAKE_CASE : int ) -> None:
'''simple docstring'''
if item not in self.pos_map:
return
A = self.pos_map[item]
del self.pos_map[item]
A = self.arr[self.size - 1]
A = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(_SCREAMING_SNAKE_CASE )
self._heapify_down(_SCREAMING_SNAKE_CASE )
def A( self : Optional[Any] ,_SCREAMING_SNAKE_CASE : int ,_SCREAMING_SNAKE_CASE : int ) -> None:
'''simple docstring'''
A = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(_SCREAMING_SNAKE_CASE )] )
else:
A = [item, self.key(_SCREAMING_SNAKE_CASE )]
A = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def A( self : str ) -> tuple | None:
'''simple docstring'''
return self.arr[0] if self.size else None
def A( self : Any ) -> tuple | None:
'''simple docstring'''
A = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def snake_case ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 110 | 1 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] , _lowercase : Optional[Any] ) ->int:
'''simple docstring'''
return int((input_a, input_a).count(0 ) == 0 )
def _SCREAMING_SNAKE_CASE ( ) ->None:
'''simple docstring'''
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 633 |
"""simple docstring"""
def A_ ( snake_case__ ) -> int:
_UpperCamelCase :Dict = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def A_ ( snake_case__ ) -> int:
_UpperCamelCase :Dict = 0
while number > 0:
_UpperCamelCase :Tuple = number % 10
sum_of_digits += last_digit
_UpperCamelCase :str = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def A_ ( snake_case__ = 1_00 ) -> int:
_UpperCamelCase :int = factorial(snake_case__ )
_UpperCamelCase :Dict = split_and_add(snake_case__ )
return result
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 355 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _snake_case ( metaclass=_snake_case ):
lowerCAmelCase :str = ['''flax''', '''transformers''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase):
requires_backends(self , ["""flax""", """transformers"""])
@classmethod
def snake_case__ ( cls , *_lowerCamelCase , **_lowerCamelCase):
requires_backends(cls , ["""flax""", """transformers"""])
@classmethod
def snake_case__ ( cls , *_lowerCamelCase , **_lowerCamelCase):
requires_backends(cls , ["""flax""", """transformers"""])
class _snake_case ( metaclass=_snake_case ):
lowerCAmelCase :List[Any] = ['''flax''', '''transformers''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase):
requires_backends(self , ["""flax""", """transformers"""])
@classmethod
def snake_case__ ( cls , *_lowerCamelCase , **_lowerCamelCase):
requires_backends(cls , ["""flax""", """transformers"""])
@classmethod
def snake_case__ ( cls , *_lowerCamelCase , **_lowerCamelCase):
requires_backends(cls , ["""flax""", """transformers"""])
class _snake_case ( metaclass=_snake_case ):
lowerCAmelCase :List[Any] = ['''flax''', '''transformers''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase):
requires_backends(self , ["""flax""", """transformers"""])
@classmethod
def snake_case__ ( cls , *_lowerCamelCase , **_lowerCamelCase):
requires_backends(cls , ["""flax""", """transformers"""])
@classmethod
def snake_case__ ( cls , *_lowerCamelCase , **_lowerCamelCase):
requires_backends(cls , ["""flax""", """transformers"""])
class _snake_case ( metaclass=_snake_case ):
lowerCAmelCase :Dict = ['''flax''', '''transformers''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase):
requires_backends(self , ["""flax""", """transformers"""])
@classmethod
def snake_case__ ( cls , *_lowerCamelCase , **_lowerCamelCase):
requires_backends(cls , ["""flax""", """transformers"""])
@classmethod
def snake_case__ ( cls , *_lowerCamelCase , **_lowerCamelCase):
requires_backends(cls , ["""flax""", """transformers"""]) | 701 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__A ={
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 113 | 0 |
def _lowercase ( __lowerCamelCase : int ) -> int:
'''simple docstring'''
if not isinstance(__lowerCamelCase ,__lowerCamelCase ):
UpperCamelCase__ : Any = F'Input value of [number={number}] must be an integer'
raise TypeError(__lowerCamelCase )
if number < 1:
UpperCamelCase__ : Optional[int] = F'Input value of [number={number}] must be > 0'
raise ValueError(__lowerCamelCase )
UpperCamelCase__ : List[str] = 1
for i in range(1 ,__lowerCamelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : str = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
_SCREAMING_SNAKE_CASE : List[Any] = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _lowercase ( __lowerCamelCase : Optional[Any] ,__lowerCamelCase : Any ,__lowerCamelCase : List[Any] ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : Dict ) -> str:
'''simple docstring'''
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
UpperCamelCase__ : List[str] = '''lm_head'''
UpperCamelCase__ : Any = getattr(__lowerCamelCase ,__lowerCamelCase )
if weight_type is not None:
UpperCamelCase__ : List[str] = getattr(__lowerCamelCase ,__lowerCamelCase ).shape
else:
UpperCamelCase__ : List[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
UpperCamelCase__ : int = value
elif weight_type == "weight_g":
UpperCamelCase__ : int = value
elif weight_type == "weight_v":
UpperCamelCase__ : int = value
elif weight_type == "bias":
UpperCamelCase__ : Optional[Any] = value
else:
UpperCamelCase__ : Any = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowercase ( __lowerCamelCase : int ,__lowerCamelCase : Any ,__lowerCamelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : Any = []
UpperCamelCase__ : Union[str, Any] = fairseq_model.state_dict()
UpperCamelCase__ : List[str] = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase__ : Tuple = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,hf_model.config.feat_extract_norm == '''group''' ,)
UpperCamelCase__ : str = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase__ : Dict = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
UpperCamelCase__ : Optional[int] = True
if "*" in mapped_key:
UpperCamelCase__ : int = name.split(__lowerCamelCase )[0].split('''.''' )[-2]
UpperCamelCase__ : int = mapped_key.replace('''*''' ,__lowerCamelCase )
if "weight_g" in name:
UpperCamelCase__ : str = '''weight_g'''
elif "weight_v" in name:
UpperCamelCase__ : Union[str, Any] = '''weight_v'''
elif "bias" in name:
UpperCamelCase__ : Dict = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase__ : List[Any] = '''weight'''
else:
UpperCamelCase__ : Optional[Any] = None
set_recursively(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
continue
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(F'Unused weights: {unused_weights}' )
def _lowercase ( __lowerCamelCase : Optional[int] ,__lowerCamelCase : int ,__lowerCamelCase : List[str] ,__lowerCamelCase : List[str] ,__lowerCamelCase : Any ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : Any = full_name.split('''conv_layers.''' )[-1]
UpperCamelCase__ : Optional[int] = name.split('''.''' )
UpperCamelCase__ : Optional[int] = int(items[0] )
UpperCamelCase__ : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
UpperCamelCase__ : List[Any] = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
UpperCamelCase__ : str = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
UpperCamelCase__ : str = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
UpperCamelCase__ : Dict = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def _lowercase ( __lowerCamelCase : Dict ,__lowerCamelCase : str ,__lowerCamelCase : Tuple=None ,__lowerCamelCase : List[Any]=None ,__lowerCamelCase : Tuple=True ) -> Any:
'''simple docstring'''
if config_path is not None:
UpperCamelCase__ : Optional[int] = UniSpeechConfig.from_pretrained(__lowerCamelCase )
else:
UpperCamelCase__ : Tuple = UniSpeechConfig()
if is_finetuned:
if dict_path:
UpperCamelCase__ : List[Any] = Dictionary.load_from_json(__lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase__ : List[str] = target_dict.pad_index
UpperCamelCase__ : Optional[Any] = target_dict.bos_index
UpperCamelCase__ : str = target_dict.eos_index
UpperCamelCase__ : Optional[Any] = len(target_dict.symbols )
UpperCamelCase__ : int = os.path.join(__lowerCamelCase ,'''vocab.json''' )
if not os.path.isdir(__lowerCamelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__lowerCamelCase ) )
return
os.makedirs(__lowerCamelCase ,exist_ok=__lowerCamelCase )
UpperCamelCase__ : int = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCamelCase__ : Tuple = 42
UpperCamelCase__ : List[str] = 43
with open(__lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as vocab_handle:
json.dump(__lowerCamelCase ,__lowerCamelCase )
UpperCamelCase__ : Tuple = WavaVecaPhonemeCTCTokenizer(
__lowerCamelCase ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token='''|''' ,do_lower_case=__lowerCamelCase ,)
UpperCamelCase__ : List[str] = True if config.feat_extract_norm == '''layer''' else False
UpperCamelCase__ : int = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16000 ,padding_value=0 ,do_normalize=__lowerCamelCase ,return_attention_mask=__lowerCamelCase ,)
UpperCamelCase__ : Dict = WavaVecaProcessor(feature_extractor=__lowerCamelCase ,tokenizer=__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
UpperCamelCase__ : int = UniSpeechForCTC(__lowerCamelCase )
else:
UpperCamelCase__ : Any = UniSpeechForPreTraining(__lowerCamelCase )
if is_finetuned:
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
UpperCamelCase__ : Tuple = model[0].eval()
recursively_load_weights(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
hf_unispeech.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
_SCREAMING_SNAKE_CASE : Any = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 344 | 1 |
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase_ : Any = logging.get_logger(__name__)
UpperCamelCase_ : Union[str, Any] = {"""vocab_file""": """vocab.txt""", """emoji_file""": """emoji.json"""}
UpperCamelCase_ : int = {
"""vocab_file""": {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt""",
},
"""emoji_file""": {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json""",
},
}
UpperCamelCase_ : List[str] = {
"""abeja/gpt-neox-japanese-2.7b""": 2_048,
}
def _lowerCAmelCase (_lowercase , _lowercase ):
"""simple docstring"""
with open(_lowercase , "r" , encoding="utf-8" ) as f:
a__ = json.loads(f.read() )
a__ = collections.OrderedDict()
a__ = collections.OrderedDict()
a__ = collections.OrderedDict()
with open(_lowercase , "r" , encoding="utf-8" ) as f:
a__ = f.readlines()
a__ = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(_lowercase ):
a__ = b
a__ = idx
for wd in b:
a__ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowerCamelCase__ ( __lowerCamelCase ):
"""simple docstring"""
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self : Any ,a__ : Optional[int] ,a__ : Any ,a__ : Tuple="<|endoftext|>" ,a__ : List[str]="<|endoftext|>" ,a__ : List[str]="<|startoftext|>" ,a__ : Union[str, Any]="<|endoftext|>" ,a__ : Dict=False ,**a__ : Dict ,):
super().__init__(
unk_token=a__ ,pad_token=a__ ,bos_token=a__ ,eos_token=a__ ,do_clean_text=a__ ,**a__ ,)
if not os.path.isfile(a__ ):
raise ValueError(
f'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
if not os.path.isfile(a__ ):
raise ValueError(
f'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
a__ = do_clean_text
a__ , a__ , a__ , a__ = load_vocab_and_emoji(a__ ,a__ )
a__ = SubWordJapaneseTokenizer(
vocab=self.vocab ,ids_to_tokens=self.ids_to_tokens ,emoji=self.emoji )
@property
def lowerCAmelCase_ ( self : Tuple ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def lowerCAmelCase_ ( self : Dict ):
return dict(self.raw_vocab ,**self.added_tokens_encoder )
def lowerCAmelCase_ ( self : Dict ,a__ : Any ):
return self.subword_tokenizer.tokenize(a__ ,clean=self.do_clean_text )
def lowerCAmelCase_ ( self : str ,a__ : Optional[int] ):
return self.vocab.get(a__ ,self.vocab.get(self.unk_token ) )
def lowerCAmelCase_ ( self : Dict ,a__ : Union[str, Any] ):
return self.subword_tokenizer.convert_id_to_token(a__ )
def lowerCAmelCase_ ( self : Union[str, Any] ,a__ : Union[str, Any] ):
a__ = "".join(a__ ).strip()
return out_string
def lowerCAmelCase_ ( self : str ,a__ : "Conversation" ):
a__ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a__ ,add_special_tokens=a__ ) + [self.eos_token_id] )
if len(a__ ) > self.model_max_length:
a__ = input_ids[-self.model_max_length :]
return input_ids
def lowerCAmelCase_ ( self : str ,a__ : str ,a__ : Optional[str] = None ):
a__ = 0
if os.path.isdir(a__ ):
a__ = os.path.join(
a__ ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
a__ = os.path.join(
a__ ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] )
else:
a__ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
a__ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(a__ ,"w" ,encoding="utf-8" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
" Please check that the vocabulary is not corrupted!" )
a__ = token_index
writer.write(",".join(a__ ) + "\n" )
index += 1
with open(a__ ,"w" ,encoding="utf-8" ) as writer:
json.dump(self.emoji ,a__ )
return vocab_file, emoji_file
class lowerCamelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : List[Any] ,a__ : Union[str, Any] ,a__ : Optional[int] ,a__ : int ):
a__ = vocab # same as swe
a__ = ids_to_tokens # same as bpe
a__ = emoji
a__ = np.max([len(a__ ) for w in self.vocab.keys()] )
a__ = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" )
a__ = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" )
a__ = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" )
a__ = re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
a__ = re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
a__ = re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" )
a__ = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
a__ = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
a__ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} )
def __len__( self : List[Any] ):
return len(self.ids_to_tokens )
def lowerCAmelCase_ ( self : str ,a__ : Union[str, Any] ):
a__ = self.content_repattera.sub("<URL>" ,a__ )
a__ = self.content_repattera.sub("<EMAIL>" ,a__ )
a__ = self.content_repattera.sub("<TEL>" ,a__ )
a__ = self.content_repattera.sub("<DATE>" ,a__ )
a__ = self.content_repattera.sub("<DATE>" ,a__ )
a__ = self.content_repattera.sub("<PRICE>" ,a__ )
a__ = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
a__ = content.replace("<BLOCK><BLOCK>" ,"<BLOCK>" )
return content
def lowerCAmelCase_ ( self : Tuple ,a__ : List[Any] ,a__ : Tuple=False ):
a__ = text.replace(" " ,"<SP>" )
a__ = text.replace(" " ,"<SP>" )
a__ = text.replace("\r\n" ,"<BR>" )
a__ = text.replace("\n" ,"<BR>" )
a__ = text.replace("\r" ,"<BR>" )
a__ = text.replace("\t" ,"<TAB>" )
a__ = text.replace("—" ,"ー" )
a__ = text.replace("−" ,"ー" )
for k, v in self.emoji["emoji"].items():
if k in text:
a__ = text.replace(a__ ,a__ )
if clean:
a__ = self.clean_text(a__ )
def check_simbol(a__ : List[Any] ):
a__ = x.encode()
if len(a__ ) == 1 and len(a__ ) == 2:
a__ = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xc2a1 and c <= 0xc2bf)
or (c >= 0xc780 and c <= 0xc783)
or (c >= 0xcab9 and c <= 0xcbbf)
or (c >= 0xcc80 and c <= 0xcda2)
):
return True
return False
def checkuae(a__ : Optional[Any] ):
a__ = x.encode()
if len(a__ ) == 1 and len(a__ ) == 3:
a__ = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xe28080 and c <= 0xe2b07f:
return True
return False
a__ = 0
a__ = []
while pos < len(a__ ):
a__ = min(len(a__ ) ,pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3
a__ = [] # (token_id, token, pos)
for e in range(a__ ,a__ ,-1 ):
a__ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(a__ ) > 2:
a__ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(a__ ) > 0:
# the smallest token_id is adopted
a__ , a__ , a__ = sorted(a__ ,key=lambda a__ : x[0] )[0]
result.append(a__ )
a__ = e
else:
a__ = pos + 1
a__ = text[pos:end]
if check_simbol(a__ ):
result.append("<KIGOU>" )
elif checkuae(a__ ):
result.append("<U2000U2BFF>" )
else:
for i in wd.encode("utf-8" ):
result.append("<|byte%d|>" % i )
a__ = end
return result
def lowerCAmelCase_ ( self : List[str] ,a__ : str ,a__ : Any="\n" ):
a__ = []
a__ = []
a__ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(a__ ) > 0:
words.append(bytearray(a__ ).decode("utf-8" ,errors="replace" ) )
a__ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word] )
elif word == "<SP>":
words.append(" " )
elif word == "<BR>":
words.append(a__ )
elif word == "<TAB>":
words.append("\t" )
elif word == "<BLOCK>":
words.append("▀" )
elif word == "<KIGOU>":
words.append("ǀ" )
elif word == "<U2000U2BFF>":
words.append("‖" )
else:
words.append(a__ )
if len(a__ ) > 0:
words.append(bytearray(a__ ).decode("utf-8" ,errors="replace" ) )
a__ = "".join(a__ )
return text
| 708 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase_ : Optional[int] = {
"""configuration_bridgetower""": [
"""BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BridgeTowerConfig""",
"""BridgeTowerTextConfig""",
"""BridgeTowerVisionConfig""",
],
"""processing_bridgetower""": ["""BridgeTowerProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Dict = ["""BridgeTowerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Tuple = [
"""BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BridgeTowerForContrastiveLearning""",
"""BridgeTowerForImageAndTextRetrieval""",
"""BridgeTowerForMaskedLM""",
"""BridgeTowerModel""",
"""BridgeTowerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
UpperCamelCase_ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 394 | 0 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
A__: int = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE_):
"""simple docstring"""
UpperCamelCase__ = ["""pixel_values"""]
def __init__( self: List[str] , __lowerCamelCase: bool = True , __lowerCamelCase: Dict[str, int] = None , __lowerCamelCase: PILImageResampling = PILImageResampling.BILINEAR , __lowerCamelCase: bool = True , __lowerCamelCase: Union[int, float] = 1 / 255 , __lowerCamelCase: bool = True , __lowerCamelCase: Dict[str, int] = None , __lowerCamelCase: bool = True , **__lowerCamelCase: int , ):
'''simple docstring'''
super().__init__(**_a )
UpperCamelCase__: Union[str, Any] = size if size is not None else {'shortest_edge': 224}
UpperCamelCase__: int = get_size_dict(_a , default_to_square=_a )
UpperCamelCase__: Optional[int] = crop_size if crop_size is not None else {'height': 256, 'width': 256}
UpperCamelCase__: List[str] = get_size_dict(_a , param_name="crop_size" )
UpperCamelCase__: Dict = do_resize
UpperCamelCase__: Optional[int] = size
UpperCamelCase__: List[Any] = resample
UpperCamelCase__: Optional[int] = do_rescale
UpperCamelCase__: Dict = rescale_factor
UpperCamelCase__: Optional[Any] = do_center_crop
UpperCamelCase__: Optional[int] = crop_size
UpperCamelCase__: Optional[Any] = do_flip_channel_order
def UpperCAmelCase_ ( self: Dict , __lowerCamelCase: np.ndarray , __lowerCamelCase: Dict[str, int] , __lowerCamelCase: PILImageResampling = PIL.Image.BILINEAR , __lowerCamelCase: Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase: Union[str, Any] , ):
'''simple docstring'''
UpperCamelCase__: int = get_size_dict(_a , default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(F"The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}" )
UpperCamelCase__: int = get_resize_output_image_size(_a , size=size["shortest_edge"] , default_to_square=_a )
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def UpperCAmelCase_ ( self: Union[str, Any] , __lowerCamelCase: np.ndarray , __lowerCamelCase: Dict[str, int] , __lowerCamelCase: Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase: int , ):
'''simple docstring'''
UpperCamelCase__: int = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(_a , size=(size["height"], size["width"]) , data_format=_a , **_a )
def UpperCAmelCase_ ( self: Optional[Any] , __lowerCamelCase: np.ndarray , __lowerCamelCase: Union[int, float] , __lowerCamelCase: Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase: Any , ):
'''simple docstring'''
return rescale(_a , scale=_a , data_format=_a , **_a )
def UpperCAmelCase_ ( self: List[Any] , __lowerCamelCase: np.ndarray , __lowerCamelCase: Optional[Union[str, ChannelDimension]] = None ):
'''simple docstring'''
return flip_channel_order(_a , data_format=_a )
def UpperCAmelCase_ ( self: str , __lowerCamelCase: ImageInput , __lowerCamelCase: bool = None , __lowerCamelCase: Dict[str, int] = None , __lowerCamelCase: PILImageResampling = None , __lowerCamelCase: bool = None , __lowerCamelCase: float = None , __lowerCamelCase: bool = None , __lowerCamelCase: Dict[str, int] = None , __lowerCamelCase: bool = None , __lowerCamelCase: Optional[Union[str, TensorType]] = None , __lowerCamelCase: ChannelDimension = ChannelDimension.FIRST , **__lowerCamelCase: str , ):
'''simple docstring'''
UpperCamelCase__: List[str] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__: Tuple = resample if resample is not None else self.resample
UpperCamelCase__: List[Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__: Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__: Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__: Optional[Any] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
UpperCamelCase__: Dict = size if size is not None else self.size
UpperCamelCase__: Optional[Any] = get_size_dict(_a , default_to_square=_a )
UpperCamelCase__: Any = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__: Dict = get_size_dict(_a , param_name="crop_size" )
UpperCamelCase__: List[str] = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
UpperCamelCase__: str = [to_numpy_array(_a ) for image in images]
if do_resize:
UpperCamelCase__: str = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_center_crop:
UpperCamelCase__: Optional[int] = [self.center_crop(image=_a , size=_a ) for image in images]
if do_rescale:
UpperCamelCase__: List[Any] = [self.rescale(image=_a , scale=_a ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
UpperCamelCase__: Optional[int] = [self.flip_channel_order(image=_a ) for image in images]
UpperCamelCase__: List[str] = [to_channel_dimension_format(_a , _a ) for image in images]
UpperCamelCase__: List[str] = {'pixel_values': images}
return BatchFeature(data=_a , tensor_type=_a )
def UpperCAmelCase_ ( self: Tuple , __lowerCamelCase: List[str] , __lowerCamelCase: List[Tuple] = None ):
'''simple docstring'''
UpperCamelCase__: Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_a ) != len(_a ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(_a ):
UpperCamelCase__: Dict = target_sizes.numpy()
UpperCamelCase__: Tuple = []
for idx in range(len(_a ) ):
UpperCamelCase__: Optional[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=_a )
UpperCamelCase__: Tuple = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_a )
else:
UpperCamelCase__: Tuple = logits.argmax(dim=1 )
UpperCamelCase__: List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 380 |
'''simple docstring'''
import functools
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> int:
# Validation
if not isinstance(_lowerCAmelCase ,_lowerCAmelCase ) or not all(isinstance(_lowerCAmelCase ,_lowerCAmelCase ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(_lowerCAmelCase ) != 3 or not all(isinstance(_lowerCAmelCase ,_lowerCAmelCase ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(_lowerCAmelCase ) == 0:
return 0
if min(_lowerCAmelCase ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(_lowerCAmelCase ) >= 366:
raise ValueError('All days elements should be less than 366' )
__lowerCamelCase : int = set(_lowerCAmelCase )
@functools.cache
def dynamic_programming(_lowerCAmelCase ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) ,costs[1] + dynamic_programming(index + 7 ) ,costs[2] + dynamic_programming(index + 30 ) ,)
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 459 | 0 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger()
@dataclass
class __lowercase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = field(default_factory=lowercase_ )
SCREAMING_SNAKE_CASE = field(default_factory=lowercase_ )
def lowerCAmelCase_ ( self : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tensor , UpperCamelCase_ : Tensor ):
"""simple docstring"""
__A = len(list(m.modules() ) ) == 1 or isinstance(UpperCamelCase_ , nn.Convad ) or isinstance(UpperCamelCase_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(UpperCamelCase_ )
def __call__( self : Any , UpperCamelCase_ : Tensor ):
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(UpperCamelCase_ )
[x.remove() for x in self.handles]
return self
@property
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
return list(filter(lambda UpperCamelCase_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class __lowercase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = field(default_factory=lowercase_ )
SCREAMING_SNAKE_CASE = field(default_factory=lowercase_ )
SCREAMING_SNAKE_CASE = True
def __call__( self : Dict , UpperCamelCase_ : Tensor ):
"""simple docstring"""
__A = Tracker(self.dest )(UpperCamelCase_ ).parametrized
__A = Tracker(self.src )(UpperCamelCase_ ).parametrized
__A = list(filter(lambda UpperCamelCase_ : type(UpperCamelCase_ ) not in self.src_skip , UpperCamelCase_ ) )
__A = list(filter(lambda UpperCamelCase_ : type(UpperCamelCase_ ) not in self.dest_skip , UpperCamelCase_ ) )
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ) and self.raise_if_mismatch:
raise Exception(
F"Numbers of operations are different. Source module has {len(UpperCamelCase_ )} operations while"
F" destination module has {len(UpperCamelCase_ )}." )
for dest_m, src_m in zip(UpperCamelCase_ , UpperCamelCase_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F"Transfered from={src_m} to={dest_m}" )
class __lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : str , UpperCamelCase_ : nn.Module ):
"""simple docstring"""
super().__init__()
__A = []
# - get the stem
feature_blocks.append(("""conv1""", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("""block""" ), F"Unexpected layer name {k}"
__A = len(UpperCamelCase_ ) + 1
feature_blocks.append((F"res{block_index}", v) )
__A = nn.ModuleDict(UpperCamelCase_ )
def lowerCAmelCase_ ( self : Optional[Any] , UpperCamelCase_ : Tensor ):
"""simple docstring"""
return get_trunk_forward_outputs(
UpperCamelCase_ , out_feat_keys=UpperCamelCase_ , feature_blocks=self._feature_blocks , )
class __lowercase ( lowercase_ ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Tuple , UpperCamelCase_ : str ):
"""simple docstring"""
__A = x.split("""-""" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : List[Any] , UpperCamelCase_ : str ):
"""simple docstring"""
if x not in self:
__A = self.convert_name_to_timm(UpperCamelCase_ )
__A = partial(lambda: (timm.create_model(UpperCamelCase_ , pretrained=UpperCamelCase_ ).eval(), None) )
else:
__A = super().__getitem__(UpperCamelCase_ )
return val
class __lowercase ( lowercase_ ):
'''simple docstring'''
def __getitem__( self : List[Any] , UpperCamelCase_ : str ):
"""simple docstring"""
if "seer" in x and "in1k" not in x:
__A = RegNetModel
else:
__A = RegNetForImageClassification
return val
def _SCREAMING_SNAKE_CASE ( __lowercase : str , __lowercase : List[Any] , __lowercase : List[Tuple[str, str]] ) -> List[str]:
"""simple docstring"""
for from_key, to_key in keys:
__A = from_state_dict[from_key].clone()
print(f"Copied key={from_key} to={to_key}" )
return to_state_dict
def _SCREAMING_SNAKE_CASE ( __lowercase : str , __lowercase : Callable[[], nn.Module] , __lowercase : Callable[[], nn.Module] , __lowercase : RegNetConfig , __lowercase : Path , __lowercase : bool = True , ) -> Optional[Any]:
"""simple docstring"""
print(f"Converting {name}..." )
with torch.no_grad():
__A , __A = from_model_func()
__A = our_model_func(__lowercase ).eval()
__A = ModuleTransfer(src=__lowercase , dest=__lowercase , raise_if_mismatch=__lowercase )
__A = torch.randn((1, 3, 2_2_4, 2_2_4) )
module_transfer(__lowercase )
if from_state_dict is not None:
__A = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
__A = [("""0.clf.0.weight""", """classifier.1.weight"""), ("""0.clf.0.bias""", """classifier.1.bias""")]
__A = manually_copy_vissl_head(__lowercase , our_model.state_dict() , __lowercase )
our_model.load_state_dict(__lowercase )
__A = our_model(__lowercase , output_hidden_states=__lowercase )
__A = (
our_outputs.logits if isinstance(__lowercase , __lowercase ) else our_outputs.last_hidden_state
)
__A = from_model(__lowercase )
__A = from_output[-1] if type(__lowercase ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
__A = our_outputs.hidden_states[-1]
assert torch.allclose(__lowercase , __lowercase ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="""Add model""" , use_temp_dir=__lowercase , )
__A = 2_2_4 if """seer""" not in name else 3_8_4
# we can use the convnext one
__A = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" , size=__lowercase )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="""Add image processor""" , use_temp_dir=__lowercase , )
print(f"Pushed {name}" )
def _SCREAMING_SNAKE_CASE ( __lowercase : Path , __lowercase : str = None , __lowercase : bool = True ) -> Optional[int]:
"""simple docstring"""
__A = """imagenet-1k-id2label.json"""
__A = 1_0_0_0
__A = (1, num_labels)
__A = """huggingface/label-files"""
__A = num_labels
__A = json.load(open(cached_download(hf_hub_url(__lowercase , __lowercase , repo_type="""dataset""" ) ) , """r""" ) )
__A = {int(__lowercase ): v for k, v in idalabel.items()}
__A = idalabel
__A = {v: k for k, v in idalabel.items()}
__A = partial(__lowercase , num_labels=__lowercase , idalabel=__lowercase , labelaid=__lowercase )
__A = {
"""regnet-x-002""": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[2_4, 5_6, 1_5_2, 3_6_8] , groups_width=8 , layer_type="""x""" ),
"""regnet-x-004""": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 1_2] , hidden_sizes=[3_2, 6_4, 1_6_0, 3_8_4] , groups_width=1_6 , layer_type="""x""" ),
"""regnet-x-006""": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[4_8, 9_6, 2_4_0, 5_2_8] , groups_width=2_4 , layer_type="""x""" ),
"""regnet-x-008""": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[6_4, 1_2_8, 2_8_8, 6_7_2] , groups_width=1_6 , layer_type="""x""" ),
"""regnet-x-016""": ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 2] , hidden_sizes=[7_2, 1_6_8, 4_0_8, 9_1_2] , groups_width=2_4 , layer_type="""x""" ),
"""regnet-x-032""": ImageNetPreTrainedConfig(
depths=[2, 6, 1_5, 2] , hidden_sizes=[9_6, 1_9_2, 4_3_2, 1_0_0_8] , groups_width=4_8 , layer_type="""x""" ),
"""regnet-x-040""": ImageNetPreTrainedConfig(
depths=[2, 5, 1_4, 2] , hidden_sizes=[8_0, 2_4_0, 5_6_0, 1_3_6_0] , groups_width=4_0 , layer_type="""x""" ),
"""regnet-x-064""": ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 1] , hidden_sizes=[1_6_8, 3_9_2, 7_8_4, 1_6_2_4] , groups_width=5_6 , layer_type="""x""" ),
"""regnet-x-080""": ImageNetPreTrainedConfig(
depths=[2, 5, 1_5, 1] , hidden_sizes=[8_0, 2_4_0, 7_2_0, 1_9_2_0] , groups_width=1_2_0 , layer_type="""x""" ),
"""regnet-x-120""": ImageNetPreTrainedConfig(
depths=[2, 5, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 8_9_6, 2_2_4_0] , groups_width=1_1_2 , layer_type="""x""" ),
"""regnet-x-160""": ImageNetPreTrainedConfig(
depths=[2, 6, 1_3, 1] , hidden_sizes=[2_5_6, 5_1_2, 8_9_6, 2_0_4_8] , groups_width=1_2_8 , layer_type="""x""" ),
"""regnet-x-320""": ImageNetPreTrainedConfig(
depths=[2, 7, 1_3, 1] , hidden_sizes=[3_3_6, 6_7_2, 1_3_4_4, 2_5_2_0] , groups_width=1_6_8 , layer_type="""x""" ),
# y variant
"""regnet-y-002""": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[2_4, 5_6, 1_5_2, 3_6_8] , groups_width=8 ),
"""regnet-y-004""": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[4_8, 1_0_4, 2_0_8, 4_4_0] , groups_width=8 ),
"""regnet-y-006""": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[4_8, 1_1_2, 2_5_6, 6_0_8] , groups_width=1_6 ),
"""regnet-y-008""": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[6_4, 1_2_8, 3_2_0, 7_6_8] , groups_width=1_6 ),
"""regnet-y-016""": ImageNetPreTrainedConfig(
depths=[2, 6, 1_7, 2] , hidden_sizes=[4_8, 1_2_0, 3_3_6, 8_8_8] , groups_width=2_4 ),
"""regnet-y-032""": ImageNetPreTrainedConfig(
depths=[2, 5, 1_3, 1] , hidden_sizes=[7_2, 2_1_6, 5_7_6, 1_5_1_2] , groups_width=2_4 ),
"""regnet-y-040""": ImageNetPreTrainedConfig(
depths=[2, 6, 1_2, 2] , hidden_sizes=[1_2_8, 1_9_2, 5_1_2, 1_0_8_8] , groups_width=6_4 ),
"""regnet-y-064""": ImageNetPreTrainedConfig(
depths=[2, 7, 1_4, 2] , hidden_sizes=[1_4_4, 2_8_8, 5_7_6, 1_2_9_6] , groups_width=7_2 ),
"""regnet-y-080""": ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 1] , hidden_sizes=[1_6_8, 4_4_8, 8_9_6, 2_0_1_6] , groups_width=5_6 ),
"""regnet-y-120""": ImageNetPreTrainedConfig(
depths=[2, 5, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 8_9_6, 2_2_4_0] , groups_width=1_1_2 ),
"""regnet-y-160""": ImageNetPreTrainedConfig(
depths=[2, 4, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 1_2_3_2, 3_0_2_4] , groups_width=1_1_2 ),
"""regnet-y-320""": ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"""regnet-y-320-seer""": RegNetConfig(depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
"""regnet-y-640-seer""": RegNetConfig(depths=[2, 5, 1_2, 1] , hidden_sizes=[3_2_8, 9_8_4, 1_9_6_8, 4_9_2_0] , groups_width=3_2_8 ),
"""regnet-y-1280-seer""": RegNetConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[5_2_8, 1_0_5_6, 2_9_0_4, 7_3_9_2] , groups_width=2_6_4 ),
"""regnet-y-2560-seer""": RegNetConfig(
depths=[3, 7, 1_6, 1] , hidden_sizes=[6_4_0, 1_6_9_6, 2_5_4_4, 5_0_8_8] , groups_width=6_4_0 ),
"""regnet-y-10b-seer""": ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[2_0_2_0, 4_0_4_0, 1_1_1_1_0, 2_8_2_8_0] , groups_width=1_0_1_0 ),
# finetuned on imagenet
"""regnet-y-320-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
"""regnet-y-640-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[3_2_8, 9_8_4, 1_9_6_8, 4_9_2_0] , groups_width=3_2_8 ),
"""regnet-y-1280-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[5_2_8, 1_0_5_6, 2_9_0_4, 7_3_9_2] , groups_width=2_6_4 ),
"""regnet-y-2560-seer-in1k""": ImageNetPreTrainedConfig(
depths=[3, 7, 1_6, 1] , hidden_sizes=[6_4_0, 1_6_9_6, 2_5_4_4, 5_0_8_8] , groups_width=6_4_0 ),
"""regnet-y-10b-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[2_0_2_0, 4_0_4_0, 1_1_1_1_0, 2_8_2_8_0] , groups_width=1_0_1_0 ),
}
__A = NameToOurModelFuncMap()
__A = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(__lowercase : str , __lowercase : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
__A = torch.hub.load_state_dict_from_url(__lowercase , model_dir=str(__lowercase ) , map_location="""cpu""" )
__A = model_func()
# check if we have a head, if yes add it
__A = files["""classy_state_dict"""]["""base_model"""]["""model"""]
__A = model_state_dict["""trunk"""]
model.load_state_dict(__lowercase )
return model.eval(), model_state_dict["heads"]
# pretrained
__A = partial(
__lowercase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__A = partial(
__lowercase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__A = partial(
__lowercase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__A = partial(
__lowercase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch""" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=2_7 , group_width=1_0_1_0 , w_a=1_7_4_4 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
__A = partial(
__lowercase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__A = partial(
__lowercase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__A = partial(
__lowercase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__A = partial(
__lowercase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch""" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=2_7 , group_width=1_0_1_0 , w_a=1_7_4_4 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
__lowercase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , __lowercase , __lowercase , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
__lowercase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , __lowercase , __lowercase , __lowercase , )
return config, expected_shape
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported regnet* architecture,"
" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
__a = parser.parse_args()
__a = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 704 |
def _SCREAMING_SNAKE_CASE ( __lowercase : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
"""simple docstring"""
__A = set()
# Replace all the whitespace in our sentence
__A = input_str.replace(""" """ , """""" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(__lowercase ) == 2_6
def _SCREAMING_SNAKE_CASE ( __lowercase : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
"""simple docstring"""
__A = [False] * 2_6
for char in input_str:
if char.islower():
__A = True
elif char.isupper():
__A = True
return all(__lowercase )
def _SCREAMING_SNAKE_CASE ( __lowercase : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6
def _SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
from timeit import timeit
__A = """from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"""
print(timeit("""is_pangram()""" , setup=__lowercase ) )
print(timeit("""is_pangram_faster()""" , setup=__lowercase ) )
print(timeit("""is_pangram_fastest()""" , setup=__lowercase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 199 | 0 |
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
_SCREAMING_SNAKE_CASE = get_tests_dir('fixtures/dummy-config.json')
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Dict:
_A = 0
def UpperCAmelCase ( self ) -> int:
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) )
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = AutoConfig.from_pretrained("""bert-base-uncased""" )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = AutoConfig.for_model("""roberta""" )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
_A = os.path.join(lowerCAmelCase_ , """fake-roberta""" )
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ , """config.json""" ) , """w""" ) as f:
f.write(json.dumps({} ) )
_A = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertEqual(type(lowerCAmelCase_ ) , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
try:
AutoConfig.register("""custom""" , lowerCAmelCase_ )
# Wrong model type will raise an error
with self.assertRaises(lowerCAmelCase_ ):
AutoConfig.register("""model""" , lowerCAmelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase_ ):
AutoConfig.register("""bert""" , lowerCAmelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
_A = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase_ )
_A = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def UpperCAmelCase ( self ) -> Tuple:
with self.assertRaisesRegex(
lowerCAmelCase_ , """bert-base is not a local folder and is not a valid model identifier""" ):
_A = AutoConfig.from_pretrained("""bert-base""" )
def UpperCAmelCase ( self ) -> Optional[Any]:
with self.assertRaisesRegex(
lowerCAmelCase_ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
_A = AutoConfig.from_pretrained(lowerCAmelCase_ , revision="""aaaaaa""" )
def UpperCAmelCase ( self ) -> Any:
with self.assertRaisesRegex(
lowerCAmelCase_ , """hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" , ):
_A = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" )
def UpperCAmelCase ( self ) -> str:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowerCAmelCase_ ):
_A = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCAmelCase_ ):
_A = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=lowerCAmelCase_ )
_A = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=lowerCAmelCase_ )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase_ )
_A = AutoConfig.from_pretrained(lowerCAmelCase_ , trust_remote_code=lowerCAmelCase_ )
self.assertEqual(reloaded_config.__class__.__name__ , """NewModelConfig""" )
def UpperCAmelCase ( self ) -> Tuple:
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Tuple = '''new-model'''
try:
AutoConfig.register("""new-model""" , lowerCAmelCase_ )
# If remote code is not set, the default is to use local
_A = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote code is disabled, we load the local one.
_A = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=lowerCAmelCase_ )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote is enabled, we load from the Hub
_A = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=lowerCAmelCase_ )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 401 | import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class a ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase ( self ) -> Tuple:
_A = FlaxMTaForConditionalGeneration.from_pretrained("""google/mt5-small""" )
_A = AutoTokenizer.from_pretrained("""google/mt5-small""" )
_A = tokenizer("""Hello there""" , return_tensors="""np""" ).input_ids
_A = tokenizer("""Hi I am""" , return_tensors="""np""" ).input_ids
_A = shift_tokens_right(lowerCAmelCase_ , model.config.pad_token_id , model.config.decoder_start_token_id )
_A = model(lowerCAmelCase_ , decoder_input_ids=lowerCAmelCase_ ).logits
_A = optax.softmax_cross_entropy(lowerCAmelCase_ , onehot(lowerCAmelCase_ , logits.shape[-1] ) ).mean()
_A = -(labels.shape[-1] * loss.item())
_A = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 401 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=99 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=128 ,__UpperCamelCase=32 ,__UpperCamelCase=16 ,__UpperCamelCase=2 ,__UpperCamelCase=0.02 ,__UpperCamelCase=3 ,__UpperCamelCase=4 ,__UpperCamelCase=None ,) -> Dict:
'''simple docstring'''
lowercase_ : List[str] = parent
lowercase_ : int = batch_size
lowercase_ : List[str] = seq_length
lowercase_ : List[Any] = is_training
lowercase_ : Optional[int] = use_input_mask
lowercase_ : int = use_token_type_ids
lowercase_ : List[Any] = use_labels
lowercase_ : List[str] = vocab_size
lowercase_ : Optional[Any] = hidden_size
lowercase_ : Optional[int] = num_hidden_layers
lowercase_ : Tuple = num_attention_heads
lowercase_ : Union[str, Any] = intermediate_size
lowercase_ : List[str] = hidden_act
lowercase_ : Any = hidden_dropout_prob
lowercase_ : List[str] = attention_probs_dropout_prob
lowercase_ : Dict = max_position_embeddings
lowercase_ : int = type_vocab_size
lowercase_ : Tuple = type_sequence_label_size
lowercase_ : Any = initializer_range
lowercase_ : List[str] = num_labels
lowercase_ : Optional[int] = num_choices
lowercase_ : List[str] = scope
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase_ : int = None
if self.use_input_mask:
lowercase_ : str = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : Union[str, Any] = None
if self.use_token_type_ids:
lowercase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
lowercase_ : List[Any] = None
lowercase_ : Optional[Any] = None
lowercase_ : Tuple = None
if self.use_labels:
lowercase_ : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowercase_ : Optional[int] = ids_tensor([self.batch_size] ,self.num_choices )
lowercase_ : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
return NezhaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_lowercase ,initializer_range=self.initializer_range ,)
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
(
lowercase_
) : Dict = self.prepare_config_and_inputs()
lowercase_ : Any = True
lowercase_ : Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase_ : str = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : List[Any] = NezhaModel(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : Any = model(_lowercase ,attention_mask=_lowercase ,token_type_ids=_lowercase )
lowercase_ : str = model(_lowercase ,token_type_ids=_lowercase )
lowercase_ : int = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) -> Dict:
'''simple docstring'''
lowercase_ : Dict = True
lowercase_ : Tuple = NezhaModel(_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : List[Any] = model(
_lowercase ,attention_mask=_lowercase ,token_type_ids=_lowercase ,encoder_hidden_states=_lowercase ,encoder_attention_mask=_lowercase ,)
lowercase_ : Optional[Any] = model(
_lowercase ,attention_mask=_lowercase ,token_type_ids=_lowercase ,encoder_hidden_states=_lowercase ,)
lowercase_ : List[str] = model(_lowercase ,attention_mask=_lowercase ,token_type_ids=_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : int = NezhaForMaskedLM(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : Any = model(_lowercase ,attention_mask=_lowercase ,token_type_ids=_lowercase ,labels=_lowercase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : Any = NezhaForNextSentencePrediction(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : Optional[Any] = model(
_lowercase ,attention_mask=_lowercase ,token_type_ids=_lowercase ,labels=_lowercase ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Any = NezhaForPreTraining(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : Any = model(
_lowercase ,attention_mask=_lowercase ,token_type_ids=_lowercase ,labels=_lowercase ,next_sentence_label=_lowercase ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
lowercase_ : Any = NezhaForQuestionAnswering(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : List[Any] = model(
_lowercase ,attention_mask=_lowercase ,token_type_ids=_lowercase ,start_positions=_lowercase ,end_positions=_lowercase ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ : Any = self.num_labels
lowercase_ : str = NezhaForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : int = model(_lowercase ,attention_mask=_lowercase ,token_type_ids=_lowercase ,labels=_lowercase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Any:
'''simple docstring'''
lowercase_ : int = self.num_labels
lowercase_ : Tuple = NezhaForTokenClassification(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : int = model(_lowercase ,attention_mask=_lowercase ,token_type_ids=_lowercase ,labels=_lowercase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : Union[str, Any] = self.num_choices
lowercase_ : str = NezhaForMultipleChoice(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : List[Any] = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
lowercase_ : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
lowercase_ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
lowercase_ : List[str] = model(
_lowercase ,attention_mask=_lowercase ,token_type_ids=_lowercase ,labels=_lowercase ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Dict = self.prepare_config_and_inputs()
(
lowercase_
) : Optional[int] = config_and_inputs
lowercase_ : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
lowercase = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase = (
{
'feature-extraction': NezhaModel,
'fill-mask': NezhaForMaskedLM,
'question-answering': NezhaForQuestionAnswering,
'text-classification': NezhaForSequenceClassification,
'token-classification': NezhaForTokenClassification,
'zero-shot': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = True
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=False ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Dict = super()._prepare_for_class(_lowercase ,_lowercase ,return_labels=_lowercase )
if return_labels:
if model_class in get_values(_lowercase ):
lowercase_ : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=_lowercase )
lowercase_ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=_lowercase )
return inputs_dict
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Any = NezhaModelTester(self )
lowercase_ : str = ConfigTester(self ,config_class=_lowercase ,hidden_size=37 )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_lowercase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
(
lowercase_
) : List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
lowercase_ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(
_lowercase ,_lowercase ,_lowercase ,_lowercase ,_lowercase ,_lowercase ,_lowercase ,_lowercase ,_lowercase ,)
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowercase )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowercase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*_lowercase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowercase )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowercase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowercase )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowercase )
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Union[str, Any] = NezhaModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
@slow
@require_torch_gpu
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
lowercase_ : int = True
lowercase_ : Dict = model_class(config=_lowercase )
lowercase_ : Any = self._prepare_for_class(_lowercase ,_lowercase )
lowercase_ : Dict = torch.jit.trace(
_lowercase ,(inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_lowercase ,os.path.join(_lowercase ,'bert.pt' ) )
lowercase_ : List[Any] = torch.jit.load(os.path.join(_lowercase ,'bert.pt' ) ,map_location=_lowercase )
loaded(inputs_dict['input_ids'].to(_lowercase ) ,inputs_dict['attention_mask'].to(_lowercase ) )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : List[Any] = NezhaModel.from_pretrained('sijunhe/nezha-cn-base' )
lowercase_ : Any = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowercase_ : str = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase_ : List[str] = model(_lowercase ,attention_mask=_lowercase )[0]
lowercase_ : str = torch.Size((1, 6, 768) )
self.assertEqual(output.shape ,_lowercase )
lowercase_ : Union[str, Any] = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,_lowercase ,atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Optional[int] = NezhaForMaskedLM.from_pretrained('sijunhe/nezha-cn-base' )
lowercase_ : Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowercase_ : Union[str, Any] = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase_ : Tuple = model(_lowercase ,attention_mask=_lowercase )[0]
lowercase_ : Dict = torch.Size((1, 6, 2_1128) )
self.assertEqual(output.shape ,_lowercase )
lowercase_ : Optional[int] = torch.tensor(
[[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,_lowercase ,atol=1e-4 ) )
| 719 | """simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE =get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( lowercase_ , unittest.TestCase ):
lowercase = DebertaVaTokenizer
lowercase = DebertaVaTokenizerFast
lowercase = True
lowercase = True
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ : Any = DebertaVaTokenizer(__UpperCamelCase ,unk_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any:
'''simple docstring'''
lowercase_ : Union[str, Any] = 'this is a test'
lowercase_ : str = 'this is a test'
return input_text, output_text
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Optional[int] = '<pad>'
lowercase_ : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<pad>' )
self.assertEqual(vocab_keys[1] ,'<unk>' )
self.assertEqual(vocab_keys[-1] ,'[PAD]' )
self.assertEqual(len(__UpperCamelCase ) ,3_0001 )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,3_0000 )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Optional[Any] = ' \tHeLLo!how \n Are yoU? '
lowercase_ : Optional[int] = ['▁hello', '!', 'how', '▁are', '▁you', '?']
# fmt: on
lowercase_ : int = DebertaVaTokenizer(__UpperCamelCase ,do_lower_case=__UpperCamelCase )
lowercase_ : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Optional[Any] = DebertaVaTokenizerFast(__UpperCamelCase ,do_lower_case=__UpperCamelCase )
lowercase_ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
pass
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Any = 'I was born in 92000, and this is falsé.'
lowercase_ : Union[str, Any] = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
lowercase_ : List[str] = DebertaVaTokenizer(__UpperCamelCase ,split_by_punct=__UpperCamelCase )
lowercase_ : Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Any = DebertaVaTokenizerFast(__UpperCamelCase ,split_by_punct=__UpperCamelCase )
lowercase_ : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Any = 'I was born in 92000, and this is falsé.'
lowercase_ : Tuple = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
lowercase_ : List[str] = DebertaVaTokenizer(__UpperCamelCase ,do_lower_case=__UpperCamelCase ,split_by_punct=__UpperCamelCase )
lowercase_ : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Tuple = DebertaVaTokenizerFast(__UpperCamelCase ,do_lower_case=__UpperCamelCase ,split_by_punct=__UpperCamelCase )
lowercase_ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : str = 'I was born in 92000, and this is falsé.'
lowercase_ : Optional[Any] = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
lowercase_ : Union[str, Any] = DebertaVaTokenizer(__UpperCamelCase ,do_lower_case=__UpperCamelCase ,split_by_punct=__UpperCamelCase )
lowercase_ : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Tuple = DebertaVaTokenizerFast(__UpperCamelCase ,do_lower_case=__UpperCamelCase ,split_by_punct=__UpperCamelCase )
lowercase_ : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : Tuple = 'I was born in 92000, and this is falsé.'
lowercase_ : Optional[int] = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
lowercase_ : Optional[int] = DebertaVaTokenizer(__UpperCamelCase ,do_lower_case=__UpperCamelCase ,split_by_punct=__UpperCamelCase )
lowercase_ : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Union[str, Any] = DebertaVaTokenizerFast(__UpperCamelCase ,do_lower_case=__UpperCamelCase ,split_by_punct=__UpperCamelCase )
lowercase_ : List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Tuple = ' \tHeLLo!how \n Are yoU? '
lowercase_ : List[str] = ['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?']
# fmt: on
lowercase_ : str = DebertaVaTokenizer(__UpperCamelCase ,do_lower_case=__UpperCamelCase ,split_by_punct=__UpperCamelCase )
lowercase_ : int = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Union[str, Any] = DebertaVaTokenizerFast(__UpperCamelCase ,do_lower_case=__UpperCamelCase ,split_by_punct=__UpperCamelCase )
lowercase_ : List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Optional[int] = self.get_tokenizer()
lowercase_ : int = self.get_rust_tokenizer()
lowercase_ : Dict = 'I was born in 92000, and this is falsé.'
lowercase_ : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ) )
lowercase_ : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Optional[int] = tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase )
lowercase_ : str = rust_tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Optional[int] = self.get_rust_tokenizer()
lowercase_ : Optional[Any] = tokenizer.encode(__UpperCamelCase )
lowercase_ : Dict = rust_tokenizer.encode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : Optional[int] = 'This is a test'
lowercase_ : Any = [13, 1, 4398, 25, 21, 1289]
lowercase_ : Union[str, Any] = ['▁', 'T', 'his', '▁is', '▁a', '▁test']
lowercase_ : str = ['▁', '<unk>', 'his', '▁is', '▁a', '▁test']
lowercase_ : Optional[int] = DebertaVaTokenizer(__UpperCamelCase ,keep_accents=__UpperCamelCase )
lowercase_ : str = DebertaVaTokenizerFast(__UpperCamelCase ,keep_accents=__UpperCamelCase )
lowercase_ : Optional[int] = tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : List[Any] = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Any = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : str = rust_tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Optional[int] = rust_tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : str = rust_tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
# fmt: off
lowercase_ : List[Any] = 'I was born in 92000, and this is falsé.'
lowercase_ : int = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
lowercase_ : List[str] = ['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ]
lowercase_ : List[str] = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
lowercase_ : List[str] = tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : List[str] = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : List[Any] = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Tuple = rust_tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Optional[int] = rust_tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Dict = rust_tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : Dict = DebertaVaTokenizer(__UpperCamelCase )
lowercase_ : int = tokenizer.encode('sequence builders' )
lowercase_ : Tuple = tokenizer.encode('multi-sequence build' )
lowercase_ : Dict = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase )
lowercase_ : Optional[int] = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase ,__UpperCamelCase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] ,__UpperCamelCase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] ,__UpperCamelCase ,)
@slow
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : List[Any] = {'input_ids': [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase ,model_name='microsoft/deberta-v2-xlarge' ,revision='ad6e42c1532ddf3a15c39246b63f5559d558b670' ,)
| 477 | 0 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : Any , _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any]) -> Tuple:
'''simple docstring'''
if isinstance(_lowerCamelCase , _lowerCamelCase):
__UpperCamelCase : Any = np.full((len(_lowerCamelCase), sequence_length, 2) , _lowerCamelCase)
else:
__UpperCamelCase : Union[str, Any] = np.full((len(_lowerCamelCase), sequence_length) , _lowerCamelCase)
for i, tensor in enumerate(_lowerCamelCase):
if padding_side == "right":
if isinstance(_lowerCamelCase , _lowerCamelCase):
__UpperCamelCase : Tuple = tensor[:sequence_length]
else:
__UpperCamelCase : Union[str, Any] = tensor[:sequence_length]
else:
if isinstance(_lowerCamelCase , _lowerCamelCase):
__UpperCamelCase : Tuple = tensor[:sequence_length]
else:
__UpperCamelCase : Dict = tensor[:sequence_length]
return out_tensor.tolist()
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase : Any = ord(_lowerCamelCase)
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
__UpperCamelCase : str = unicodedata.category(_lowerCamelCase)
if cat.startswith("P"):
return True
return False
@dataclass
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = 42
_A = True
_A = None
_A = None
_A = -1_0_0
_A = "pt"
def _lowerCamelCase ( self :int , a :Optional[int] ) -> List[str]:
import torch
__UpperCamelCase : List[Any] = "label" if "label" in features[0].keys() else "labels"
__UpperCamelCase : int = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__UpperCamelCase : Any = self.tokenizer.pad(
a , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , )
if labels is None:
return batch
__UpperCamelCase : str = torch.tensor(batch["entity_ids"] ).shape[1]
__UpperCamelCase : str = self.tokenizer.padding_side
if padding_side == "right":
__UpperCamelCase : Optional[Any] = [
list(a ) + [self.label_pad_token_id] * (sequence_length - len(a )) for label in labels
]
else:
__UpperCamelCase : str = [
[self.label_pad_token_id] * (sequence_length - len(a )) + list(a ) for label in labels
]
__UpperCamelCase : Tuple = [feature["ner_tags"] for feature in features]
__UpperCamelCase : Optional[Any] = padding_tensor(a , -1 , a , a )
__UpperCamelCase : Dict = [feature["original_entity_spans"] for feature in features]
__UpperCamelCase : Any = padding_tensor(a , (-1, -1) , a , a )
__UpperCamelCase : Dict = {k: torch.tensor(a , dtype=torch.intaa ) for k, v in batch.items()}
return batch | 557 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowercase : List[Any] = 'Run commands across TPU VMs for initial setup before running `accelerate launch`.'
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any]=None) -> List[str]:
'''simple docstring'''
if subparsers is not None:
__UpperCamelCase : int = subparsers.add_parser("tpu-config" , description=_description)
else:
__UpperCamelCase : Dict = argparse.ArgumentParser("Accelerate tpu-config command" , description=_description)
# Core arguments
__UpperCamelCase : Optional[Any] = parser.add_argument_group(
"Config Arguments" , "Arguments that can be configured through `accelerate config`.")
config_args.add_argument(
"--config_file" , type=_lowerCamelCase , default=_lowerCamelCase , help="Path to the config file to use for accelerate." , )
config_args.add_argument(
"--tpu_name" , default=_lowerCamelCase , help="The name of the TPU to use. If not specified, will use the TPU specified in the config file." , )
config_args.add_argument(
"--tpu_zone" , default=_lowerCamelCase , help="The zone of the TPU to use. If not specified, will use the zone specified in the config file." , )
__UpperCamelCase : Tuple = parser.add_argument_group("TPU Arguments" , "Arguments for options ran inside the TPU.")
pod_args.add_argument(
"--use_alpha" , action="store_true" , help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`." , )
pod_args.add_argument(
"--command_file" , default=_lowerCamelCase , help="The path to the file containing the commands to run on the pod on startup." , )
pod_args.add_argument(
"--command" , action="append" , nargs="+" , help="A command to run on the pod. Can be passed multiple times." , )
pod_args.add_argument(
"--install_accelerate" , action="store_true" , help="Whether to install accelerate on the pod. Defaults to False." , )
pod_args.add_argument(
"--accelerate_version" , default="latest" , help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub." , )
pod_args.add_argument(
"--debug" , action="store_true" , help="If set, will print the command that would be run instead of running it.")
if subparsers is not None:
parser.set_defaults(func=_lowerCamelCase)
return parser
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any]) -> Dict:
'''simple docstring'''
__UpperCamelCase : List[str] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_lowerCamelCase):
__UpperCamelCase : Dict = load_config_from_file(args.config_file)
if not args.command_file and defaults.command_file is not None and not args.command:
__UpperCamelCase : List[Any] = defaults.command_file
if not args.command and defaults.commands is not None:
__UpperCamelCase : int = defaults.commands
if not args.tpu_name:
__UpperCamelCase : int = defaults.tpu_name
if not args.tpu_zone:
__UpperCamelCase : Optional[Any] = defaults.tpu_zone
if args.accelerate_version == "dev":
__UpperCamelCase : List[str] = "git+https://github.com/huggingface/accelerate.git"
elif args.accelerate_version == "latest":
__UpperCamelCase : Optional[int] = "accelerate -U"
elif isinstance(parse(args.accelerate_version) , _lowerCamelCase):
__UpperCamelCase : Union[str, Any] = F'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError("You must specify either a command file or a command to run on the pod.")
if args.command_file:
with open(args.command_file , "r") as f:
__UpperCamelCase : List[Any] = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _lowerCamelCase):
__UpperCamelCase : Union[str, Any] = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
__UpperCamelCase : str = ["cd /usr/share"]
if args.install_accelerate:
new_cmd += [F'pip install {args.accelerate_version}']
new_cmd += args.command
__UpperCamelCase : str = "; ".join(_lowerCamelCase)
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
__UpperCamelCase : List[Any] = ["gcloud"]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F'Running {" ".join(_lowerCamelCase)}')
return
subprocess.run(_lowerCamelCase)
print("Successfully setup pod.")
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = tpu_command_parser()
__UpperCamelCase : Optional[Any] = parser.parse_args()
tpu_command_launcher(_lowerCamelCase) | 557 | 1 |
import unittest
from transformers import DonutProcessor
UpperCAmelCase_ : Union[str, Any] = "naver-clova-ix/donut-base"
class UpperCamelCase ( unittest.TestCase ):
def __A ( self ):
A__ = DonutProcessor.from_pretrained(UpperCAmelCase__ )
def __A ( self ):
A__ = {
"name": "John Doe",
"age": "99",
"city": "Atlanta",
"state": "GA",
"zip": "30301",
"phone": "123-4567",
"nicknames": [{"nickname": "Johnny"}, {"nickname": "JD"}],
}
A__ = (
"<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>"
"<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>"
"<s_nicknames><s_nickname>Johnny</s_nickname>"
"<sep/><s_nickname>JD</s_nickname></s_nicknames>"
)
A__ = self.processor.tokenajson(UpperCAmelCase__ )
self.assertDictEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 232 |
def UpperCamelCase ( _A : list[list[int]] , _A : int , _A : int , _A : list[int] )-> bool:
"""simple docstring"""
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def UpperCamelCase ( _A : list[list[int]] , _A : list[int] , _A : int )-> bool:
"""simple docstring"""
if curr_ind == len(_A ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(_A ) ):
if valid_connection(_A , _A , _A , _A ):
# Insert current vertex into path as next transition
A__ = next_ver
# Validate created path
if util_hamilton_cycle(_A , _A , curr_ind + 1 ):
return True
# Backtrack
A__ = -1
return False
def UpperCamelCase ( _A : list[list[int]] , _A : int = 0 )-> list[int]:
"""simple docstring"""
A__ = [-1] * (len(_A ) + 1)
# initialize start and end of path with starting index
A__ = A__ = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(_A , _A , 1 ) else []
| 232 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
if isinstance(_lowerCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_lowerCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_lowerCAmelCase ):
return [[videos]]
raise ValueError(F'Could not make batched video from {videos}' )
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = ['pixel_values']
def __init__( self : Dict,__A : bool = True,__A : Dict[str, int] = None,__A : PILImageResampling = PILImageResampling.BILINEAR,__A : bool = True,__A : Dict[str, int] = None,__A : bool = True,__A : Union[int, float] = 1 / 2_5_5,__A : bool = True,__A : bool = True,__A : Optional[Union[float, List[float]]] = None,__A : Optional[Union[float, List[float]]] = None,**__A : Optional[int],):
super().__init__(**__A )
_lowerCamelCase : Optional[int] = size if size is not None else {"shortest_edge": 2_5_6}
_lowerCamelCase : int = get_size_dict(__A,default_to_square=__A )
_lowerCamelCase : Optional[int] = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
_lowerCamelCase : Tuple = get_size_dict(__A,param_name="crop_size" )
_lowerCamelCase : Tuple = do_resize
_lowerCamelCase : List[str] = size
_lowerCamelCase : List[str] = do_center_crop
_lowerCamelCase : Any = crop_size
_lowerCamelCase : int = resample
_lowerCamelCase : str = do_rescale
_lowerCamelCase : int = rescale_factor
_lowerCamelCase : Optional[int] = offset
_lowerCamelCase : Tuple = do_normalize
_lowerCamelCase : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCamelCase : List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase_ ( self : Dict,__A : np.ndarray,__A : Dict[str, int],__A : PILImageResampling = PILImageResampling.BILINEAR,__A : Optional[Union[str, ChannelDimension]] = None,**__A : Dict,):
_lowerCamelCase : Any = get_size_dict(__A,default_to_square=__A )
if "shortest_edge" in size:
_lowerCamelCase : int = get_resize_output_image_size(__A,size["shortest_edge"],default_to_square=__A )
elif "height" in size and "width" in size:
_lowerCamelCase : str = (size["height"], size["width"])
else:
raise ValueError(f'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
return resize(__A,size=__A,resample=__A,data_format=__A,**__A )
def lowerCamelCase_ ( self : Tuple,__A : np.ndarray,__A : Dict[str, int],__A : Optional[Union[str, ChannelDimension]] = None,**__A : List[Any],):
_lowerCamelCase : Tuple = get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(f'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(__A,size=(size["height"], size["width"]),data_format=__A,**__A )
def lowerCamelCase_ ( self : List[Any],__A : np.ndarray,__A : Union[int, float],__A : bool = True,__A : Optional[Union[str, ChannelDimension]] = None,**__A : Tuple,):
_lowerCamelCase : List[str] = image.astype(np.floataa )
if offset:
_lowerCamelCase : Any = image - (scale / 2)
return rescale(__A,scale=__A,data_format=__A,**__A )
def lowerCamelCase_ ( self : Optional[Any],__A : np.ndarray,__A : Union[float, List[float]],__A : Union[float, List[float]],__A : Optional[Union[str, ChannelDimension]] = None,**__A : Optional[Any],):
return normalize(__A,mean=__A,std=__A,data_format=__A,**__A )
def lowerCamelCase_ ( self : Any,__A : ImageInput,__A : bool = None,__A : Dict[str, int] = None,__A : PILImageResampling = None,__A : bool = None,__A : Dict[str, int] = None,__A : bool = None,__A : float = None,__A : bool = None,__A : bool = None,__A : Optional[Union[float, List[float]]] = None,__A : Optional[Union[float, List[float]]] = None,__A : Optional[ChannelDimension] = ChannelDimension.FIRST,):
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
_lowerCamelCase : Optional[int] = to_numpy_array(__A )
if do_resize:
_lowerCamelCase : Tuple = self.resize(image=__A,size=__A,resample=__A )
if do_center_crop:
_lowerCamelCase : Any = self.center_crop(__A,size=__A )
if do_rescale:
_lowerCamelCase : List[str] = self.rescale(image=__A,scale=__A,offset=__A )
if do_normalize:
_lowerCamelCase : Optional[Any] = self.normalize(image=__A,mean=__A,std=__A )
_lowerCamelCase : Dict = to_channel_dimension_format(__A,__A )
return image
def lowerCamelCase_ ( self : Dict,__A : ImageInput,__A : bool = None,__A : Dict[str, int] = None,__A : PILImageResampling = None,__A : bool = None,__A : Dict[str, int] = None,__A : bool = None,__A : float = None,__A : bool = None,__A : bool = None,__A : Optional[Union[float, List[float]]] = None,__A : Optional[Union[float, List[float]]] = None,__A : Optional[Union[str, TensorType]] = None,__A : ChannelDimension = ChannelDimension.FIRST,**__A : Optional[Any],):
_lowerCamelCase : List[str] = do_resize if do_resize is not None else self.do_resize
_lowerCamelCase : int = resample if resample is not None else self.resample
_lowerCamelCase : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCamelCase : str = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase : Union[str, Any] = offset if offset is not None else self.offset
_lowerCamelCase : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
_lowerCamelCase : Dict = image_mean if image_mean is not None else self.image_mean
_lowerCamelCase : Union[str, Any] = image_std if image_std is not None else self.image_std
_lowerCamelCase : Any = size if size is not None else self.size
_lowerCamelCase : Any = get_size_dict(__A,default_to_square=__A )
_lowerCamelCase : str = crop_size if crop_size is not None else self.crop_size
_lowerCamelCase : Optional[Any] = get_size_dict(__A,param_name="crop_size" )
if not valid_images(__A ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
_lowerCamelCase : str = make_batched(__A )
_lowerCamelCase : int = [
[
self._preprocess_image(
image=__A,do_resize=__A,size=__A,resample=__A,do_center_crop=__A,crop_size=__A,do_rescale=__A,rescale_factor=__A,offset=__A,do_normalize=__A,image_mean=__A,image_std=__A,data_format=__A,)
for img in video
]
for video in videos
]
_lowerCamelCase : Any = {"pixel_values": videos}
return BatchFeature(data=__A,tensor_type=__A ) | 44 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase : Optional[Any] = {
"""configuration_funnel""": ["""FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FunnelConfig"""],
"""convert_funnel_original_tf_checkpoint_to_pytorch""": [],
"""tokenization_funnel""": ["""FunnelTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : int = ["""FunnelTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[Any] = [
"""FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FunnelBaseModel""",
"""FunnelForMaskedLM""",
"""FunnelForMultipleChoice""",
"""FunnelForPreTraining""",
"""FunnelForQuestionAnswering""",
"""FunnelForSequenceClassification""",
"""FunnelForTokenClassification""",
"""FunnelModel""",
"""FunnelPreTrainedModel""",
"""load_tf_weights_in_funnel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Union[str, Any] = [
"""TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFFunnelBaseModel""",
"""TFFunnelForMaskedLM""",
"""TFFunnelForMultipleChoice""",
"""TFFunnelForPreTraining""",
"""TFFunnelForQuestionAnswering""",
"""TFFunnelForSequenceClassification""",
"""TFFunnelForTokenClassification""",
"""TFFunnelModel""",
"""TFFunnelPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
__lowercase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 142 | 0 |
'''simple docstring'''
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCamelCase ( a : Any , a : List[str]=() , a : Tuple=None , a : List[str]="no" , a : Dict="29500" ) ->int:
snake_case = False
snake_case = False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
snake_case = True
elif "IPython" in sys.modules:
snake_case = '''google.colab''' in str(sys.modules['''IPython'''].get_ipython() )
try:
snake_case = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f"""Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.""" )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' , a ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
snake_case = 8
snake_case = PrepareForLaunch(a , distributed_type='''TPU''' )
print(f"""Launching a training on {num_processes} TPU cores.""" )
xmp.spawn(a , args=a , nprocs=a , start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*a )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=a , master_addr='''127.0.01''' , master_port=a , mixed_precision=a ):
snake_case = PrepareForLaunch(a , distributed_type='''MULTI_GPU''' )
print(f"""Launching training on {num_processes} GPUs.""" )
try:
start_processes(a , args=a , nprocs=a , start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
snake_case = '''1'''
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*a )
def __UpperCamelCase ( a : Dict , a : Optional[int]=() , a : Dict=2 ) ->int:
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=a , master_addr='''127.0.01''' , master_port='''29500''' , accelerate_mixed_precision='''no''' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='''yes''' , ):
snake_case = PrepareForLaunch(a , debug=a )
start_processes(a , args=a , nprocs=a , start_method='''fork''' )
| 705 |
'''simple docstring'''
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class _lowercase ( yaml.SafeLoader ):
def UpperCamelCase ( self , A__ ) -> List[str]:
snake_case = [self.constructed_objects[key_node] for key_node, _ in node.value]
snake_case = [tuple(A__ ) if isinstance(A__ , A__ ) else key for key in keys]
snake_case = Counter(A__ )
snake_case = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" )
def UpperCamelCase ( self , A__ , A__=False ) -> List[Any]:
snake_case = super().construct_mapping(A__ , deep=A__ )
self._check_no_duplicates_on_constructed_node(A__ )
return mapping
def __UpperCamelCase ( a : str ) ->Tuple[Optional[str], str]:
snake_case = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
snake_case = full_content[1:].index('''---''' ) + 1
snake_case = '''\n'''.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(a )
class _lowercase ( __a ):
# class attributes
_UpperCAmelCase = {'''train_eval_index'''} # train-eval-index in the YAML metadata
@classmethod
def UpperCamelCase ( cls , A__ ) -> "DatasetMetadata":
with open(A__ , encoding='''utf-8''' ) as readme_file:
snake_case , snake_case = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(A__ )
else:
return cls()
def UpperCamelCase ( self , A__ ) -> str:
if path.exists():
with open(A__ , encoding='''utf-8''' ) as readme_file:
snake_case = readme_file.read()
else:
snake_case = None
snake_case = self._to_readme(A__ )
with open(A__ , '''w''' , encoding='''utf-8''' ) as readme_file:
readme_file.write(A__ )
def UpperCamelCase ( self , A__ = None ) -> str:
if readme_content is not None:
snake_case , snake_case = _split_yaml_from_readme(A__ )
snake_case = '''---\n''' + self.to_yaml_string() + '''---\n''' + content
else:
snake_case = '''---\n''' + self.to_yaml_string() + '''---\n'''
return full_content
@classmethod
def UpperCamelCase ( cls , A__ ) -> "DatasetMetadata":
snake_case = yaml.load(A__ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
snake_case = {
(key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**A__ )
def UpperCamelCase ( self ) -> str:
return yaml.safe_dump(
{
(key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=A__ , allow_unicode=A__ , encoding='''utf-8''' , ).decode('''utf-8''' )
_lowercase = {
'image-classification': [],
'translation': [],
'image-segmentation': [],
'fill-mask': [],
'automatic-speech-recognition': [],
'token-classification': [],
'sentence-similarity': [],
'audio-classification': [],
'question-answering': [],
'summarization': [],
'zero-shot-classification': [],
'table-to-text': [],
'feature-extraction': [],
'other': [],
'multiple-choice': [],
'text-classification': [],
'text-to-image': [],
'text2text-generation': [],
'zero-shot-image-classification': [],
'tabular-classification': [],
'tabular-regression': [],
'image-to-image': [],
'tabular-to-text': [],
'unconditional-image-generation': [],
'text-retrieval': [],
'text-to-speech': [],
'object-detection': [],
'audio-to-audio': [],
'text-generation': [],
'conversational': [],
'table-question-answering': [],
'visual-question-answering': [],
'image-to-text': [],
'reinforcement-learning': [],
'voice-activity-detection': [],
'time-series-forecasting': [],
'document-question-answering': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
_lowercase = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.')
ap.add_argument('readme_filepath')
_lowercase = ap.parse_args()
_lowercase = Path(args.readme_filepath)
_lowercase = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 44 | 0 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float:
'''simple docstring'''
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod() | 46 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : Any = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(A__, A__ )
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = emb.weight.shape
SCREAMING_SNAKE_CASE_ : List[Any] = nn.Linear(A__, A__, bias=A__ )
SCREAMING_SNAKE_CASE_ : Dict = emb.weight.data
return lin_layer
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = torch.load(A__, map_location='cpu' )
SCREAMING_SNAKE_CASE_ : Tuple = mam_aaa['args'] or mam_aaa['cfg']['model']
SCREAMING_SNAKE_CASE_ : Any = mam_aaa['model']
remove_ignore_keys_(A__ )
SCREAMING_SNAKE_CASE_ : List[str] = state_dict['encoder.embed_tokens.weight'].shape[0]
SCREAMING_SNAKE_CASE_ : Dict = MaMaaaConfig(
vocab_size=A__, max_position_embeddings=1_0_2_4, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, encoder_layerdrop=args.encoder_layerdrop, decoder_layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='relu', )
SCREAMING_SNAKE_CASE_ : int = state_dict['decoder.embed_tokens.weight']
SCREAMING_SNAKE_CASE_ : Union[str, Any] = MaMaaaForConditionalGeneration(A__ )
model.model.load_state_dict(A__, strict=A__ )
SCREAMING_SNAKE_CASE_ : int = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase__ : Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
lowerCAmelCase__ : Optional[Any] =parser.parse_args()
lowerCAmelCase__ : int =convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 101 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any]=13 , UpperCAmelCase__ : Union[str, Any]=32 , UpperCAmelCase__ : List[Any]=3 , UpperCAmelCase__ : str=4 , UpperCAmelCase__ : Optional[int]=[10, 20, 30, 40] , UpperCAmelCase__ : int=[2, 2, 3, 2] , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[Any]=37 , UpperCAmelCase__ : Optional[Any]="gelu" , UpperCAmelCase__ : int=10 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Optional[Any]=["stage2", "stage3", "stage4"] , UpperCAmelCase__ : str=[2, 3, 4] , UpperCAmelCase__ : Optional[Any]=None , ):
'''simple docstring'''
lowercase : Dict =parent
lowercase : List[Any] =batch_size
lowercase : str =image_size
lowercase : Optional[Any] =num_channels
lowercase : Optional[Any] =num_stages
lowercase : str =hidden_sizes
lowercase : Optional[Any] =depths
lowercase : Tuple =is_training
lowercase : str =use_labels
lowercase : str =intermediate_size
lowercase : List[Any] =hidden_act
lowercase : Dict =num_labels
lowercase : Optional[int] =initializer_range
lowercase : Union[str, Any] =out_features
lowercase : Any =out_indices
lowercase : Dict =scope
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Any =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : Dict =None
if self.use_labels:
lowercase : Tuple =ids_tensor([self.batch_size] , self.num_labels )
lowercase : Dict =self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : Union[str, Any] =ConvNextVaModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
lowercase : List[str] =model(_UpperCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
lowercase : Dict =ConvNextVaForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
lowercase : Dict =model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : str ):
'''simple docstring'''
lowercase : Optional[Any] =ConvNextVaBackbone(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
lowercase : Union[str, Any] =model(_UpperCamelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase : Optional[Any] =None
lowercase : List[str] =ConvNextVaBackbone(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
lowercase : Optional[Any] =model(_UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Optional[Any] =self.prepare_config_and_inputs()
lowercase : Optional[Any] =config_and_inputs
lowercase : Optional[int] ={"""pixel_values""": pixel_values}
return config, inputs_dict
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : List[str] =self.prepare_config_and_inputs()
lowercase : Tuple =config_and_inputs
lowercase : Optional[int] ={"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
lowerCamelCase_ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase_ = (
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : str =ConvNextVaModelTester(self )
lowercase : Optional[Any] =ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37 )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
return
@unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs_with_labels()
lowercase : int =True
if model_class.__name__ in [
*get_values(_UpperCamelCase ),
*get_values(_UpperCamelCase ),
]:
continue
lowercase : Dict =model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.train()
lowercase : int =self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
lowercase : List[str] =model(**_UpperCamelCase ).loss
loss.backward()
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase : Dict =self.model_tester.prepare_config_and_inputs_with_labels()
lowercase : Union[str, Any] =False
lowercase : int =True
if (
model_class.__name__
in [*get_values(_UpperCamelCase ), *get_values(_UpperCamelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
lowercase : List[Any] =model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.gradient_checkpointing_enable()
model.train()
lowercase : str =self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
lowercase : Optional[Any] =model(**_UpperCamelCase ).loss
loss.backward()
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : str =model_class(_UpperCamelCase )
lowercase : Any =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : Union[str, Any] =[*signature.parameters.keys()]
lowercase : List[Any] =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict ):
lowercase : Optional[Any] =model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
lowercase : Optional[int] =model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
lowercase : str =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase : List[Any] =self.model_tester.num_stages
self.assertEqual(len(_UpperCamelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase : Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Dict =True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase : Optional[int] =True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
@slow
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Tuple =ConvNextVaModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def _lowerCAmelCase ( ) -> Any:
lowercase : str =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Any =ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(_UpperCamelCase )
lowercase : int =self.default_image_processor
lowercase : List[Any] =prepare_img()
lowercase : Optional[int] =preprocessor(images=_UpperCamelCase , return_tensors='''pt''' ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
lowercase : Optional[Any] =model(**_UpperCamelCase )
# verify the logits
lowercase : str =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
lowercase : str =torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1E-4 ) )
| 715 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[Any]=None ):
'''simple docstring'''
# Input as list
lowercase : Optional[int] =list(poly_a or [0] )[:]
lowercase : Optional[Any] =list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowercase : Any =len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowercase : Dict =len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowercase : int =int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowercase : Union[str, Any] =complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowercase : Tuple =self.__multiply()
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Tuple ):
'''simple docstring'''
lowercase : Union[str, Any] =[[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(UpperCAmelCase__ ) <= 1:
return dft[0]
#
lowercase : Any =self.c_max_length // 2
while next_ncol > 0:
lowercase : Optional[int] =[[] for i in range(UpperCAmelCase__ )]
lowercase : Tuple =self.root**next_ncol
# First half of next step
lowercase : str =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase__ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowercase : int =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase__ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowercase : Dict =new_dft
lowercase : Tuple =next_ncol // 2
return dft[0]
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Any =self.__dft('''A''' )
lowercase : Any =self.__dft('''B''' )
lowercase : Optional[int] =[[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowercase : Optional[int] =2
while next_ncol <= self.c_max_length:
lowercase : Optional[int] =[[] for i in range(UpperCAmelCase__ )]
lowercase : List[str] =self.root ** (next_ncol // 2)
lowercase : Optional[int] =1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowercase : List[Any] =new_inverse_c
next_ncol *= 2
# Unpack
lowercase : Tuple =[round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Any ):
'''simple docstring'''
lowercase : Any ='''A = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowercase : Tuple ='''B = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowercase : List[str] ='''A*B = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return F'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 0 |
"""simple docstring"""
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__magic_name__ : str = [
'cross_validation.py',
'gradient_accumulation.py',
'local_sgd.py',
'multi_process_metrics.py',
'memory.py',
'automatic_gradient_accumulation.py',
'fsdp_with_peak_mem_tracking.py',
'deepspeed_with_config_support.py',
'megatron_lm_gpt_pretraining.py',
]
class __snake_case (unittest.TestCase ):
def __a ( self: Any , A_: str , A_: bool , A_: str = None , A_: list = None ):
__lowerCamelCase = None
__lowerCamelCase = os.path.abspath(os.path.join("""examples""" , """by_feature""" ) )
__lowerCamelCase = os.path.abspath("""examples""" )
for item in os.listdir(A_ ):
if item not in EXCLUDE_EXAMPLES:
__lowerCamelCase = os.path.join(A_ , A_ )
if os.path.isfile(A_ ) and ".py" in item_path:
with self.subTest(
tested_script=A_ , feature_script=A_ , tested_section="""main()""" if parser_only else """training_function()""" , ):
__lowerCamelCase = compare_against_test(
os.path.join(A_ , A_ ) , A_ , A_ , A_ )
__lowerCamelCase = """\n""".join(A_ )
if special_strings is not None:
for string in special_strings:
__lowerCamelCase = diff.replace(A_ , """""" )
self.assertEqual(A_ , """""" )
def __a ( self: str ):
self.one_complete_example("""complete_nlp_example.py""" , A_ )
self.one_complete_example("""complete_nlp_example.py""" , A_ )
def __a ( self: Dict ):
__lowerCamelCase = os.path.abspath(os.path.join("""examples""" , """cv_example.py""" ) )
__lowerCamelCase = [
""" """ * 16 + """{\n\n""",
""" """ * 20 + """\"accuracy\": eval_metric[\"accuracy\"],\n\n""",
""" """ * 20 + """\"f1\": eval_metric[\"f1\"],\n\n""",
""" """ * 20 + """\"train_loss\": total_loss.item() / len(train_dataloader),\n\n""",
""" """ * 20 + """\"epoch\": epoch,\n\n""",
""" """ * 16 + """},\n\n""",
""" """ * 16 + """step=epoch,\n""",
""" """ * 12,
""" """ * 8 + """for step, batch in enumerate(active_dataloader):\n""",
]
self.one_complete_example("""complete_cv_example.py""" , A_ , A_ , A_ )
self.one_complete_example("""complete_cv_example.py""" , A_ , A_ , A_ )
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class __snake_case (lowerCamelCase ):
__a = False
@classmethod
def __a ( cls: List[str] ):
super().setUpClass()
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = os.path.join(cls._tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
__lowerCamelCase = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def __a ( cls: Dict ):
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def __a ( self: List[str] ):
__lowerCamelCase = f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """epoch_0""" ) ) )
def __a ( self: Dict ):
__lowerCamelCase = f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split()
__lowerCamelCase = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """step_2""" ) ) )
def __a ( self: int ):
__lowerCamelCase = f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '.split()
__lowerCamelCase = run_command(self._launch_args + testargs , return_stdout=A_ )
self.assertNotIn("""epoch 0:""" , A_ )
self.assertIn("""epoch 1:""" , A_ )
def __a ( self: Tuple ):
__lowerCamelCase = f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '.split()
__lowerCamelCase = run_command(self._launch_args + testargs , return_stdout=A_ )
if torch.cuda.is_available():
__lowerCamelCase = torch.cuda.device_count()
else:
__lowerCamelCase = 1
if num_processes > 1:
self.assertNotIn("""epoch 0:""" , A_ )
self.assertIn("""epoch 1:""" , A_ )
else:
self.assertIn("""epoch 0:""" , A_ )
self.assertIn("""epoch 1:""" , A_ )
@slow
def __a ( self: Any ):
__lowerCamelCase = """
examples/by_feature/cross_validation.py
--num_folds 2
""".split()
with mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """0"""} ):
__lowerCamelCase = run_command(self._launch_args + testargs , return_stdout=A_ )
__lowerCamelCase = re.findall("""({.+})""" , A_ )
__lowerCamelCase = [r for r in results if """accuracy""" in r][-1]
__lowerCamelCase = ast.literal_eval(A_ )
self.assertGreaterEqual(results["""accuracy"""] , 0.75 )
def __a ( self: Dict ):
__lowerCamelCase = ["""examples/by_feature/multi_process_metrics.py"""]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __a ( self: Dict ):
with tempfile.TemporaryDirectory() as tmpdir:
__lowerCamelCase = f'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(A_ , """tracking""" ) ) )
def __a ( self: Any ):
__lowerCamelCase = ["""examples/by_feature/gradient_accumulation.py"""]
run_command(self._launch_args + testargs )
def __a ( self: List[str] ):
__lowerCamelCase = ["""examples/by_feature/local_sgd.py"""]
run_command(self._launch_args + testargs )
| 281 |
"""simple docstring"""
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ : Dict = get_tests_dir('fixtures/test_sentencepiece.model')
__magic_name__ : int = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
__magic_name__ : Any = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class __snake_case (lowerCamelCase , unittest.TestCase ):
__a = CamembertTokenizer
__a = CamembertTokenizerFast
__a = True
__a = True
def __a ( self: List[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase = CamembertTokenizer(A_ )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self: Union[str, Any] ):
__lowerCamelCase = """<pad>"""
__lowerCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def __a ( self: Any ):
__lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(A_ ) , 10_04 )
def __a ( self: Any ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_05 )
def __a ( self: Optional[Any] ):
__lowerCamelCase = CamembertTokenizer(A_ )
tokenizer.save_pretrained(self.tmpdirname )
__lowerCamelCase = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
__lowerCamelCase = """I was born in 92000, and this is falsé."""
__lowerCamelCase = tokenizer.encode(A_ )
__lowerCamelCase = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
__lowerCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ )
__lowerCamelCase = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
__lowerCamelCase = tokenizer.convert_ids_to_tokens(A_ )
__lowerCamelCase = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
def __a ( self: List[str] ):
if not self.test_rust_tokenizer:
return
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase = """I was born in 92000, and this is falsé."""
__lowerCamelCase = tokenizer.tokenize(A_ )
__lowerCamelCase = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
__lowerCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ )
__lowerCamelCase = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase = tokenizer.encode(A_ )
__lowerCamelCase = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
@slow
def __a ( self: Optional[Any] ):
# fmt: off
__lowerCamelCase = {"""input_ids""": [[5, 54, 71_96, 2_97, 30, 23, 7_76, 18, 11, 32_15, 37_05, 82_52, 22, 31_64, 11_81, 21_16, 29, 16, 8_13, 25, 7_91, 33_14, 20, 34_46, 38, 2_75_75, 1_20, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_68, 17, 11, 90_88, 20, 15_17, 8, 2_28_04, 1_88_18, 10, 38, 6_29, 6_07, 6_07, 1_42, 19, 71_96, 8_67, 56, 1_03_26, 24, 22_67, 20, 4_16, 50_72, 1_56_12, 2_33, 7_34, 7, 23_99, 27, 16, 30_15, 16_49, 7, 24, 20, 43_38, 23_99, 27, 13, 34_00, 14, 13, 61_89, 8, 9_30, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
__lowerCamelCase = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=A_ , )
| 281 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
def __lowercase ( lowerCamelCase_ : Dict ):
SCREAMING_SNAKE_CASE__ = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["stage2", "stage3", "stage4"] , )
SCREAMING_SNAKE_CASE__ = DetaConfig(
backbone_config=lowerCamelCase_ , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=lowerCamelCase_ , with_box_refine=lowerCamelCase_ , two_stage=lowerCamelCase_ , )
# set labels
SCREAMING_SNAKE_CASE__ = "huggingface/label-files"
if "o365" in model_name:
SCREAMING_SNAKE_CASE__ = 366
SCREAMING_SNAKE_CASE__ = "object365-id2label.json"
else:
SCREAMING_SNAKE_CASE__ = 91
SCREAMING_SNAKE_CASE__ = "coco-detection-id2label.json"
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = json.load(open(cached_download(hf_hub_url(lowerCamelCase_ , lowerCamelCase_ , repo_type="dataset" ) ) , "r" ) )
SCREAMING_SNAKE_CASE__ = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ = idalabel
SCREAMING_SNAKE_CASE__ = {v: k for k, v in idalabel.items()}
return config
def __lowercase ( lowerCamelCase_ : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.patch_embed.proj.weight", "model.backbone.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.0.body.patch_embed.proj.bias", "model.backbone.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.0.body.patch_embed.norm.weight", "model.backbone.model.embeddings.norm.weight") )
rename_keys.append(("backbone.0.body.patch_embed.norm.bias", "model.backbone.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.reduction.weight''', F'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.norm.weight''', F'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.norm.bias''', F'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(("backbone.0.body.norm1.weight", "model.backbone.model.hidden_states_norms.stage2.weight") )
rename_keys.append(("backbone.0.body.norm1.bias", "model.backbone.model.hidden_states_norms.stage2.bias") )
rename_keys.append(("backbone.0.body.norm2.weight", "model.backbone.model.hidden_states_norms.stage3.weight") )
rename_keys.append(("backbone.0.body.norm2.bias", "model.backbone.model.hidden_states_norms.stage3.bias") )
rename_keys.append(("backbone.0.body.norm3.weight", "model.backbone.model.hidden_states_norms.stage4.weight") )
rename_keys.append(("backbone.0.body.norm3.bias", "model.backbone.model.hidden_states_norms.stage4.bias") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', F'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', F'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', F'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', F'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', F'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', F'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', F'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', F'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.weight''', F'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', F'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', F'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', F'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', F'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', F'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', F'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', F'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', F'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.weight''', F'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm2.weight''', F'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm2.bias''', F'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def __lowercase ( lowerCamelCase_ : int , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE__ = dct.pop(lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = val
def __lowercase ( lowerCamelCase_ : Any , lowerCamelCase_ : str ):
SCREAMING_SNAKE_CASE__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ = state_dict.pop(F'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(F'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE__ = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE__ = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE__ = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-dim :]
# fmt: on
def __lowercase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[Any] ):
# transformer decoder self-attention layers
SCREAMING_SNAKE_CASE__ = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
SCREAMING_SNAKE_CASE__ = state_dict.pop(F'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(F'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[:hidden_size, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[:hidden_size]
SCREAMING_SNAKE_CASE__ = in_proj_weight[
hidden_size : hidden_size * 2, :
]
SCREAMING_SNAKE_CASE__ = in_proj_bias[hidden_size : hidden_size * 2]
SCREAMING_SNAKE_CASE__ = in_proj_weight[-hidden_size:, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-hidden_size:]
def __lowercase ( ):
SCREAMING_SNAKE_CASE__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE__ = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
return im
@torch.no_grad()
def __lowercase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = get_deta_config(lowerCamelCase_ )
# load original state dict
if model_name == "deta-swin-large":
SCREAMING_SNAKE_CASE__ = hf_hub_download(repo_id="nielsr/deta-checkpoints" , filename="adet_swin_ft.pth" )
elif model_name == "deta-swin-large-o365":
SCREAMING_SNAKE_CASE__ = hf_hub_download(repo_id="jozhang97/deta-swin-l-o365" , filename="deta_swin_pt_o365.pth" )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
SCREAMING_SNAKE_CASE__ = torch.load(lowerCamelCase_ , map_location="cpu" )["model"]
# original state dict
for name, param in state_dict.items():
print(lowerCamelCase_ , param.shape )
# rename keys
SCREAMING_SNAKE_CASE__ = create_rename_keys(lowerCamelCase_ )
for src, dest in rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
read_in_swin_q_k_v(lowerCamelCase_ , config.backbone_config )
read_in_decoder_q_k_v(lowerCamelCase_ , lowerCamelCase_ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
SCREAMING_SNAKE_CASE__ = state_dict.pop(lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = val
if "input_proj" in key:
SCREAMING_SNAKE_CASE__ = state_dict.pop(lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
SCREAMING_SNAKE_CASE__ = state_dict.pop(lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = val
# finally, create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE__ = DetaForObjectDetection(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ = "cuda" if torch.cuda.is_available() else "cpu"
model.to(lowerCamelCase_ )
# load image processor
SCREAMING_SNAKE_CASE__ = DetaImageProcessor(format="coco_detection" )
# verify our conversion on image
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = processor(images=lowerCamelCase_ , return_tensors="pt" )
SCREAMING_SNAKE_CASE__ = encoding["pixel_values"]
SCREAMING_SNAKE_CASE__ = model(pixel_values.to(lowerCamelCase_ ) )
# verify logits
print("Logits:" , outputs.logits[0, :3, :3] )
print("Boxes:" , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
SCREAMING_SNAKE_CASE__ = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
SCREAMING_SNAKE_CASE__ = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(lowerCamelCase_ ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(lowerCamelCase_ ) , atol=1e-4 )
print("Everything ok!" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
# Push to hub
if push_to_hub:
print("Pushing model and processor to hub..." )
model.push_to_hub(F'''jozhang97/{model_name}''' )
processor.push_to_hub(F'''jozhang97/{model_name}''' )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_lowerCamelCase = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 112 |
"""simple docstring"""
def __lowercase ( lowerCamelCase_ : List[Any] ):
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = set({"(", "[", "{"} )
SCREAMING_SNAKE_CASE__ = set({")", "]", "}"} )
SCREAMING_SNAKE_CASE__ = {"{": "}", "[": "]", "(": ")"}
for i in range(len(lowerCamelCase_ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(lowerCamelCase_ ) == 0 or (len(lowerCamelCase_ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(lowerCamelCase_ ) == 0
def __lowercase ( ):
SCREAMING_SNAKE_CASE__ = input("Enter sequence of brackets: " )
if is_balanced(lowerCamelCase_ ):
print(lowerCamelCase_ , "is balanced" )
else:
print(lowerCamelCase_ , "is not balanced" )
if __name__ == "__main__":
main()
| 112 | 1 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase ):
a__ : Tuple = 1
@register_to_config
def __init__( self : List[Any] , _lowercase : Any=20_00 , _lowercase : Union[str, Any]=0.1 , _lowercase : Union[str, Any]=20 , _lowercase : Optional[int]=1E-3 ):
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
def a ( self : List[Any] , _lowercase : str , _lowercase : Union[str, torch.device] = None ):
__UpperCAmelCase = torch.linspace(1 , self.config.sampling_eps , _lowercase , device=_lowercase )
def a ( self : Optional[int] , _lowercase : Any , _lowercase : Tuple , _lowercase : Dict , _lowercase : Dict=None ):
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__UpperCAmelCase = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__UpperCAmelCase = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__UpperCAmelCase = std.flatten()
while len(std.shape ) < len(score.shape ):
__UpperCAmelCase = std.unsqueeze(-1 )
__UpperCAmelCase = -score / std
# compute
__UpperCAmelCase = -1.0 / len(self.timesteps )
__UpperCAmelCase = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__UpperCAmelCase = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__UpperCAmelCase = beta_t.unsqueeze(-1 )
__UpperCAmelCase = -0.5 * beta_t * x
__UpperCAmelCase = torch.sqrt(_lowercase )
__UpperCAmelCase = drift - diffusion**2 * score
__UpperCAmelCase = x + drift * dt
# add noise
__UpperCAmelCase = randn_tensor(x.shape , layout=x.layout , generator=_lowercase , device=x.device , dtype=x.dtype )
__UpperCAmelCase = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Optional[int] ):
return self.config.num_train_timesteps
| 49 |
'''simple docstring'''
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__A = logging.getLogger(__name__)
def _A ( lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = False , ):
lowercase__ = bnb_quantization_config.load_in_abit
lowercase__ = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
lowercase__ = []
# custom device map
if isinstance(lowercase__ , lowercase__ ) and len(device_map.keys() ) > 1:
lowercase__ = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowercase__ = get_keys_to_not_convert(lowercase__ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(lowercase__ )
lowercase__ = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowercase__ = []
lowercase__ = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(lowercase__ )
# compatibility with peft
lowercase__ = load_in_abit
lowercase__ = load_in_abit
lowercase__ = get_parameter_device(lowercase__ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
lowercase__ = replace_with_bnb_layers(lowercase__ , lowercase__ , modules_to_not_convert=lowercase__ )
# convert param to the right dtype
lowercase__ = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
lowercase__ = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
lowercase__ = getattr(lowercase__ , lowercase__ , lowercase__ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(lowercase__ ):
param.to(lowercase__ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
lowercase__ = replace_with_bnb_layers(
lowercase__ , lowercase__ , modules_to_not_convert=lowercase__ )
lowercase__ = get_quantized_model_device_map(
lowercase__ , lowercase__ , lowercase__ , max_memory=lowercase__ , no_split_module_classes=lowercase__ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowercase__ = True
lowercase__ = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
lowercase__ , lowercase__ , lowercase__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=lowercase__ , offload_state_dict=lowercase__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(lowercase__ , device_map=lowercase__ , offload_dir=lowercase__ )
def _A ( lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=None ):
if device_map is None:
if torch.cuda.is_available():
lowercase__ = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(lowercase__ , lowercase__ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
lowercase__ = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
lowercase__ = {}
lowercase__ = special_dtypes
lowercase__ = no_split_module_classes
lowercase__ = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowercase__ = get_balanced_memory(
lowercase__ , low_zero=(device_map == """balanced_low_0""") , max_memory=lowercase__ , **lowercase__ , )
lowercase__ = max_memory
lowercase__ = infer_auto_device_map(lowercase__ , **lowercase__ )
if isinstance(lowercase__ , lowercase__ ):
# check if don't have any quantized module on the cpu
lowercase__ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowercase__ = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def _A ( lowercase__ , lowercase__ , lowercase__=None , lowercase__=None ):
if modules_to_not_convert is None:
lowercase__ = []
lowercase__ , lowercase__ = _replace_with_bnb_layers(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def _A ( lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , ):
lowercase__ = False
for name, module in model.named_children():
if current_key_name is None:
lowercase__ = []
current_key_name.append(lowercase__ )
if isinstance(lowercase__ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowercase__ = """.""".join(lowercase__ )
lowercase__ = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowercase__ = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowercase__ = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=lowercase__ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowercase__ = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
lowercase__ = module.weight.data
if module.bias is not None:
lowercase__ = module.bias.data
bnb_module.requires_grad_(lowercase__ )
setattr(lowercase__ , lowercase__ , lowercase__ )
lowercase__ = True
if len(list(module.children() ) ) > 0:
lowercase__ , lowercase__ = _replace_with_bnb_layers(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
lowercase__ = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _A ( lowercase__ ):
# Create a copy of the model
with init_empty_weights():
lowercase__ = deepcopy(lowercase__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowercase__ = find_tied_parameters(lowercase__ )
# For compatibility with Accelerate < 0.18
if isinstance(lowercase__ , lowercase__ ):
lowercase__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowercase__ = sum(lowercase__ , [] )
lowercase__ = len(lowercase__ ) > 0
# Check if it is a base model
lowercase__ = False
if hasattr(lowercase__ , """base_model_prefix""" ):
lowercase__ = not hasattr(lowercase__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowercase__ = list(model.named_children() )
lowercase__ = [list_modules[-1][0]]
# add last module together with tied weights
lowercase__ = set(lowercase__ ) - set(lowercase__ )
lowercase__ = list(set(lowercase__ ) ) + list(lowercase__ )
# remove ".weight" from the keys
lowercase__ = [""".weight""", """.bias"""]
lowercase__ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowercase__ = name.replace(lowercase__ , """""" )
filtered_module_names.append(lowercase__ )
return filtered_module_names
def _A ( lowercase__ ):
for m in model.modules():
if isinstance(lowercase__ , bnb.nn.Linearabit ):
return True
return False
def _A ( lowercase__ ):
return next(parameter.parameters() ).device
def _A ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(lowercase__ , lowercase__ , 0 , dtype=lowercase__ , value=lowercase__ )
lowercase__ = param_name
lowercase__ = model
if "." in tensor_name:
lowercase__ = tensor_name.split(""".""" )
for split in splits[:-1]:
lowercase__ = getattr(lowercase__ , lowercase__ )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
lowercase__ = new_module
lowercase__ = splits[-1]
# offload weights
lowercase__ = False
offload_weight(module._parameters[tensor_name] , lowercase__ , lowercase__ , index=lowercase__ )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , lowercase__ , index=lowercase__ , )
else:
offload_weight(lowercase__ , lowercase__ , lowercase__ , index=lowercase__ )
offload_weight(lowercase__ , param_name.replace("""weight""" , """SCB""" ) , lowercase__ , index=lowercase__ )
set_module_tensor_to_device(lowercase__ , lowercase__ , """meta""" , dtype=lowercase__ , value=torch.empty(*param.size() ) )
| 325 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class lowerCamelCase__ ( UpperCamelCase__ ):
lowerCamelCase_ : List[Any] = 'dpt'
def __init__(self : Union[str, Any] , _snake_case : int=768 , _snake_case : Optional[int]=12 , _snake_case : Optional[Any]=12 , _snake_case : Dict=3072 , _snake_case : Optional[Any]="gelu" , _snake_case : Union[str, Any]=0.0 , _snake_case : Optional[Any]=0.0 , _snake_case : int=0.02 , _snake_case : Optional[Any]=1e-12 , _snake_case : Union[str, Any]=384 , _snake_case : int=16 , _snake_case : Tuple=3 , _snake_case : Tuple=False , _snake_case : int=True , _snake_case : Optional[Any]=[2, 5, 8, 11] , _snake_case : Any="project" , _snake_case : Any=[4, 2, 1, 0.5] , _snake_case : str=[96, 192, 384, 768] , _snake_case : int=256 , _snake_case : Union[str, Any]=-1 , _snake_case : List[Any]=False , _snake_case : int=True , _snake_case : List[Any]=0.4 , _snake_case : Any=255 , _snake_case : Any=0.1 , _snake_case : Tuple=[1, 1024, 24, 24] , _snake_case : int=[0, 1] , _snake_case : Union[str, Any]=None , **_snake_case : Dict , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**__A )
lowerCamelCase_ : Optional[int] = hidden_size
lowerCamelCase_ : Tuple = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('Initializing the config with a `BiT` backbone.' )
lowerCamelCase_ : Tuple = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
}
lowerCamelCase_ : Any = BitConfig(**__A )
elif isinstance(__A , __A ):
logger.info('Initializing the config with a `BiT` backbone.' )
lowerCamelCase_ : Tuple = BitConfig(**__A )
elif isinstance(__A , __A ):
lowerCamelCase_ : Optional[int] = backbone_config
else:
raise ValueError(
f'backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.' )
lowerCamelCase_ : List[str] = backbone_featmap_shape
lowerCamelCase_ : Optional[int] = neck_ignore_stages
if readout_type != "project":
raise ValueError('Readout type must be \'project\' when using `DPT-hybrid` mode.' )
else:
lowerCamelCase_ : Tuple = None
lowerCamelCase_ : str = None
lowerCamelCase_ : List[Any] = []
lowerCamelCase_ : Union[str, Any] = num_hidden_layers
lowerCamelCase_ : Optional[int] = num_attention_heads
lowerCamelCase_ : Union[str, Any] = intermediate_size
lowerCamelCase_ : Union[str, Any] = hidden_act
lowerCamelCase_ : Dict = hidden_dropout_prob
lowerCamelCase_ : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase_ : List[str] = initializer_range
lowerCamelCase_ : int = layer_norm_eps
lowerCamelCase_ : Optional[Any] = image_size
lowerCamelCase_ : Optional[Any] = patch_size
lowerCamelCase_ : Optional[Any] = num_channels
lowerCamelCase_ : Tuple = qkv_bias
lowerCamelCase_ : Tuple = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('Readout_type must be one of [\'ignore\', \'add\', \'project\']' )
lowerCamelCase_ : Union[str, Any] = readout_type
lowerCamelCase_ : Union[str, Any] = reassemble_factors
lowerCamelCase_ : Any = neck_hidden_sizes
lowerCamelCase_ : Union[str, Any] = fusion_hidden_size
lowerCamelCase_ : List[Any] = head_in_index
lowerCamelCase_ : Optional[int] = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
lowerCamelCase_ : Tuple = use_auxiliary_head
lowerCamelCase_ : Optional[Any] = auxiliary_loss_weight
lowerCamelCase_ : List[Any] = semantic_loss_ignore_index
lowerCamelCase_ : Optional[Any] = semantic_classifier_dropout
def UpperCAmelCase_ (self : Any ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : List[str] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCamelCase_ : Optional[Any] = self.backbone_config.to_dict()
lowerCamelCase_ : List[Any] = self.__class__.model_type
return output
| 715 |
from __future__ import annotations
def _a ( lowerCamelCase__ , lowerCamelCase__ ) -> list[int]:
lowerCamelCase_ : List[Any] = 0
lowerCamelCase_ : Union[str, Any] = len(lowerCamelCase__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowerCamelCase_ : List[Any] = i + 1
else:
lowerCamelCase_ : Any = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{two_pointer([2, 7, 1_1, 1_5], 9) = }''')
| 144 | 0 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 11 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
_lowerCamelCase : List[str] = """\
"""
_lowerCamelCase : Optional[int] = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
_lowerCamelCase : List[Any] = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowercase ( datasets.Metric):
'''simple docstring'''
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , )
def lowerCamelCase_ ( self : List[Any] , snake_case : Optional[int] , snake_case : int , snake_case : int = 16 , snake_case : bool = True , snake_case : Dict=None ):
'''simple docstring'''
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
SCREAMING_SNAKE_CASE : Union[str, Any] = 'cuda'
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = 'cuda' if torch.cuda.is_available() else 'cpu'
SCREAMING_SNAKE_CASE : Any = AutoModelForCausalLM.from_pretrained(snake_case )
SCREAMING_SNAKE_CASE : Dict = model.to(snake_case )
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoTokenizer.from_pretrained(snake_case )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
SCREAMING_SNAKE_CASE : str = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(snake_case ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
SCREAMING_SNAKE_CASE : List[str] = model.config.max_length - 1
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = model.config.max_length
SCREAMING_SNAKE_CASE : Tuple = tokenizer(
snake_case , add_special_tokens=snake_case , padding=snake_case , truncation=snake_case , max_length=snake_case , return_tensors='pt' , return_attention_mask=snake_case , ).to(snake_case )
SCREAMING_SNAKE_CASE : List[Any] = encodings['input_ids']
SCREAMING_SNAKE_CASE : Dict = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Optional[Any] = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 , len(snake_case ) , snake_case ) ):
SCREAMING_SNAKE_CASE : Dict = min(start_index + batch_size , len(snake_case ) )
SCREAMING_SNAKE_CASE : List[str] = encoded_texts[start_index:end_index]
SCREAMING_SNAKE_CASE : Optional[int] = attn_masks[start_index:end_index]
if add_start_token:
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(snake_case )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
SCREAMING_SNAKE_CASE : int = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(snake_case ), attn_mask] , dim=1 )
SCREAMING_SNAKE_CASE : Optional[Any] = encoded_batch
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = model(snake_case , attention_mask=snake_case ).logits
SCREAMING_SNAKE_CASE : Optional[Any] = out_logits[..., :-1, :].contiguous()
SCREAMING_SNAKE_CASE : Optional[int] = labels[..., 1:].contiguous()
SCREAMING_SNAKE_CASE : Optional[int] = attn_mask[..., 1:].contiguous()
SCREAMING_SNAKE_CASE : Optional[int] = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , snake_case ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(snake_case )} | 352 | 0 |
'''simple docstring'''
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
lowerCAmelCase__ = {
'''n_samples''': 64,
'''horizon''': 32,
'''num_inference_steps''': 20,
'''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network
'''scale_grad_by_std''': True,
'''scale''': 0.1,
'''eta''': 0.0,
'''t_grad_cutoff''': 2,
'''device''': '''cpu''',
}
if __name__ == "__main__":
lowerCAmelCase__ = '''hopper-medium-v2'''
lowerCAmelCase__ = gym.make(env_name)
lowerCAmelCase__ = ValueGuidedRLPipeline.from_pretrained(
'''bglick13/hopper-medium-v2-value-function-hor32''',
env=env,
)
env.seed(0)
lowerCAmelCase__ = env.reset()
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1000
lowerCAmelCase__ = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
lowerCAmelCase__ = pipeline(obs, planning_horizon=32)
# execute action in environment
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = env.step(denorm_actions)
lowerCAmelCase__ = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f'Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'
f' {total_score}'
)
# save observations for rendering
rollout.append(next_observation.copy())
lowerCAmelCase__ = next_observation
except KeyboardInterrupt:
pass
print(f'Total reward: {total_reward}')
| 624 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase__ = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 624 | 1 |
from __future__ import annotations
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
if len(lowerCAmelCase__ ) == 0:
raise ValueError("""find_max() arg is an empty sequence""" )
if (
left >= len(lowerCAmelCase__ )
or left < -len(lowerCAmelCase__ )
or right >= len(lowerCAmelCase__ )
or right < -len(lowerCAmelCase__ )
):
raise IndexError("""list index out of range""" )
if left == right:
return nums[left]
lowercase = (left + right) >> 1 # the middle
lowercase = find_max(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # find max in range[left, mid]
lowercase = find_max(lowerCAmelCase__ ,mid + 1 ,lowerCAmelCase__ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 428 |
import os
from distutils.util import strtobool
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ):
for e in env_keys:
lowercase = int(os.environ.get(lowerCAmelCase__ ,-1 ) )
if val >= 0:
return val
return default
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__=False ):
lowercase = os.environ.get(lowerCAmelCase__ ,str(lowerCAmelCase__ ) )
return strtobool(lowerCAmelCase__ ) == 1 # As its name indicates `strtobool` actually returns an int...
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__="no" ):
lowercase = os.environ.get(lowerCAmelCase__ ,str(lowerCAmelCase__ ) )
return value
| 428 | 1 |
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_SCREAMING_SNAKE_CASE = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_SCREAMING_SNAKE_CASE = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_SCREAMING_SNAKE_CASE = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_000))
def snake_case ( snake_case__ :str , snake_case__ :str) -> List[Any]:
_A = len([g for position, g in enumerate(snake_case__) if g == main_target[position]])
return (item, float(snake_case__))
def snake_case ( snake_case__ :str , snake_case__ :str) -> Union[str, Any]:
_A = random.randint(0 , len(snake_case__) - 1)
_A = parent_a[:random_slice] + parent_a[random_slice:]
_A = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def snake_case ( snake_case__ :str , snake_case__ :list[str]) -> Tuple:
_A = list(snake_case__)
if random.uniform(0 , 1) < MUTATION_PROBABILITY:
_A = random.choice(snake_case__)
return "".join(snake_case__)
def snake_case ( snake_case__ :tuple[str, float] , snake_case__ :list[tuple[str, float]] , snake_case__ :list[str] , ) -> Optional[int]:
_A = []
# Generate more children proportionally to the fitness score.
_A = int(parent_a[1] * 100) + 1
_A = 10 if child_n >= 10 else child_n
for _ in range(snake_case__):
_A = population_score[random.randint(0 , snake_case__)][0]
_A = crossover(parent_a[0] , snake_case__)
# Append new string to the population list.
pop.append(mutate(snake_case__ , snake_case__))
pop.append(mutate(snake_case__ , snake_case__))
return pop
def snake_case ( snake_case__ :str , snake_case__ :list[str] , snake_case__ :bool = True) -> Dict:
if N_POPULATION < N_SELECTED:
_A = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(snake_case__)
# Verify that the target contains no genes besides the ones inside genes variable.
_A = sorted({c for c in target if c not in genes})
if not_in_genes_list:
_A = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(snake_case__)
# Generate random starting population.
_A = []
for _ in range(snake_case__):
population.append("""""".join([random.choice(snake_case__) for i in range(len(snake_case__))]))
# Just some logs to know what the algorithms is doing.
_A = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(snake_case__)
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_A = [evaluate(snake_case__ , snake_case__) for item in population]
# Check if there is a matching evolution.
_A = sorted(snake_case__ , key=lambda snake_case__: x[1] , reverse=snake_case__)
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''')
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_A = population[: int(N_POPULATION / 3)]
population.clear()
population.extend(snake_case__)
# Normalize population score to be between 0 and 1.
_A = [
(item, score / len(snake_case__)) for item, score in population_score
]
# This is selection
for i in range(snake_case__):
population.extend(select(population_score[int(snake_case__)] , snake_case__ , snake_case__))
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(snake_case__) > N_POPULATION:
break
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
_SCREAMING_SNAKE_CASE = list(
' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'
'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'
)
_SCREAMING_SNAKE_CASE = basic(target_str, genes_list)
print(
F'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 717 | import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def snake_case ( snake_case__ :int) -> Optional[int]:
return EnvironmentCommand()
def snake_case ( snake_case__ :Tuple) -> List[str]:
return EnvironmentCommand(args.accelerate_config_file)
class a ( __lowerCAmelCase ):
"""simple docstring"""
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ ) -> Tuple:
_A = parser.add_parser("""env""" )
download_parser.set_defaults(func=lowerCAmelCase_ )
download_parser.add_argument(
"""--accelerate-config_file""" , default=lowerCAmelCase_ , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=lowerCAmelCase_ )
def __init__( self , lowerCAmelCase_ , *lowerCAmelCase_ ) -> None:
_A = accelerate_config_file
def UpperCAmelCase ( self ) -> Dict:
_A = """not installed"""
if is_safetensors_available():
import safetensors
_A = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
_A = F'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
_A = """not installed"""
_A = _A = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
_A = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(lowerCAmelCase_ ):
_A = load_config_from_file(self._accelerate_config_file ).to_dict()
_A = (
"""\n""".join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
else F'''\t{accelerate_config}'''
)
_A = """not installed"""
_A = """NA"""
if is_torch_available():
import torch
_A = torch.__version__
_A = torch.cuda.is_available()
_A = """not installed"""
_A = """NA"""
if is_tf_available():
import tensorflow as tf
_A = tf.__version__
try:
# deprecated in v2.1
_A = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
_A = bool(tf.config.list_physical_devices("""GPU""" ) )
_A = """not installed"""
_A = """not installed"""
_A = """not installed"""
_A = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
_A = flax.__version__
_A = jax.__version__
_A = jaxlib.__version__
_A = jax.lib.xla_bridge.get_backend().platform
_A = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": F'''{safetensors_version}''',
"""Accelerate version""": F'''{accelerate_version}''',
"""Accelerate config""": F'''{accelerate_config_str}''',
"""PyTorch version (GPU?)""": F'''{pt_version} ({pt_cuda_available})''',
"""Tensorflow version (GPU?)""": F'''{tf_version} ({tf_cuda_available})''',
"""Flax version (CPU?/GPU?/TPU?)""": F'''{flax_version} ({jax_backend})''',
"""Jax version""": F'''{jax_version}''',
"""JaxLib version""": F'''{jaxlib_version}''',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(lowerCAmelCase_ ) )
return info
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ ) -> Tuple:
return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 83 | 0 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
class _lowerCamelCase ( UpperCamelCase_ ):
__a = "linear"
__a = "cosine"
__a = "cosine_with_restarts"
__a = "polynomial"
__a = "constant"
__a = "constant_with_warmup"
__a = "piecewise_constant"
def A__ ( snake_case_ : Optimizer , snake_case_ : int = -1 ):
return LambdaLR(snake_case_ , lambda snake_case_ : 1 , last_epoch=snake_case_ )
def A__ ( snake_case_ : Optimizer , snake_case_ : int , snake_case_ : int = -1 ):
def lr_lambda(snake_case_ : int ):
if current_step < num_warmup_steps:
return float(snake_case_ ) / float(max(1.0 , snake_case_ ) )
return 1.0
return LambdaLR(snake_case_ , snake_case_ , last_epoch=snake_case_ )
def A__ ( snake_case_ : Optimizer , snake_case_ : str , snake_case_ : int = -1 ):
SCREAMING_SNAKE_CASE__: Optional[int]= {}
SCREAMING_SNAKE_CASE__: Tuple= step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[int]= rule_str.split(''':''' )
SCREAMING_SNAKE_CASE__: Dict= int(snake_case_ )
SCREAMING_SNAKE_CASE__: List[Any]= float(snake_case_ )
SCREAMING_SNAKE_CASE__: Any= value
SCREAMING_SNAKE_CASE__: Optional[int]= float(rule_list[-1] )
def create_rules_function(snake_case_ : Any , snake_case_ : int ):
def rule_func(snake_case_ : int ) -> float:
SCREAMING_SNAKE_CASE__: Any= sorted(rules_dict.keys() )
for i, sorted_step in enumerate(snake_case_ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
SCREAMING_SNAKE_CASE__: Union[str, Any]= create_rules_function(snake_case_ , snake_case_ )
return LambdaLR(snake_case_ , snake_case_ , last_epoch=snake_case_ )
def A__ ( snake_case_ : Any , snake_case_ : Dict , snake_case_ : Tuple , snake_case_ : Dict=-1 ):
def lr_lambda(snake_case_ : int ):
if current_step < num_warmup_steps:
return float(snake_case_ ) / float(max(1 , snake_case_ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(snake_case_ , snake_case_ , snake_case_ )
def A__ ( snake_case_ : Optimizer , snake_case_ : int , snake_case_ : int , snake_case_ : float = 0.5 , snake_case_ : int = -1 ):
def lr_lambda(snake_case_ : Tuple ):
if current_step < num_warmup_steps:
return float(snake_case_ ) / float(max(1 , snake_case_ ) )
SCREAMING_SNAKE_CASE__: Any= float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(snake_case_ ) * 2.0 * progress )) )
return LambdaLR(snake_case_ , snake_case_ , snake_case_ )
def A__ ( snake_case_ : Optimizer , snake_case_ : int , snake_case_ : int , snake_case_ : int = 1 , snake_case_ : int = -1 ):
def lr_lambda(snake_case_ : Optional[int] ):
if current_step < num_warmup_steps:
return float(snake_case_ ) / float(max(1 , snake_case_ ) )
SCREAMING_SNAKE_CASE__: Dict= float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(snake_case_ ) * progress) % 1.0) )) )
return LambdaLR(snake_case_ , snake_case_ , snake_case_ )
def A__ ( snake_case_ : List[Any] , snake_case_ : Dict , snake_case_ : Any , snake_case_ : Tuple=1E-7 , snake_case_ : Dict=1.0 , snake_case_ : int=-1 ):
SCREAMING_SNAKE_CASE__: int= optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(F'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})' )
def lr_lambda(snake_case_ : int ):
if current_step < num_warmup_steps:
return float(snake_case_ ) / float(max(1 , snake_case_ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
SCREAMING_SNAKE_CASE__: Optional[int]= lr_init - lr_end
SCREAMING_SNAKE_CASE__: Optional[int]= num_training_steps - num_warmup_steps
SCREAMING_SNAKE_CASE__: List[Any]= 1 - (current_step - num_warmup_steps) / decay_steps
SCREAMING_SNAKE_CASE__: Optional[Any]= lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(snake_case_ , snake_case_ , snake_case_ )
lowercase_ : Tuple = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def A__ ( snake_case_ : Union[str, SchedulerType] , snake_case_ : Optimizer , snake_case_ : Optional[str] = None , snake_case_ : Optional[int] = None , snake_case_ : Optional[int] = None , snake_case_ : int = 1 , snake_case_ : float = 1.0 , snake_case_ : int = -1 , ):
SCREAMING_SNAKE_CASE__: List[str]= SchedulerType(snake_case_ )
SCREAMING_SNAKE_CASE__: Optional[int]= TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(snake_case_ , last_epoch=snake_case_ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(snake_case_ , step_rules=snake_case_ , last_epoch=snake_case_ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F'{name} requires `num_warmup_steps`, please provide that argument.' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(snake_case_ , num_warmup_steps=snake_case_ , last_epoch=snake_case_ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F'{name} requires `num_training_steps`, please provide that argument.' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
snake_case_ , num_warmup_steps=snake_case_ , num_training_steps=snake_case_ , num_cycles=snake_case_ , last_epoch=snake_case_ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
snake_case_ , num_warmup_steps=snake_case_ , num_training_steps=snake_case_ , power=snake_case_ , last_epoch=snake_case_ , )
return schedule_func(
snake_case_ , num_warmup_steps=snake_case_ , num_training_steps=snake_case_ , last_epoch=snake_case_ )
| 64 |
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
lowerCAmelCase : Optional[int] = argparse.ArgumentParser(
description=(
'Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='roberta', choices=['roberta', 'gpt2'])
parser.add_argument('--model_name', default='roberta-large', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_roberta_048131723.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
lowerCAmelCase : Union[str, Any] = parser.parse_args()
if args.model_type == "roberta":
lowerCAmelCase : Optional[Any] = RobertaForMaskedLM.from_pretrained(args.model_name)
lowerCAmelCase : Dict = 'roberta'
elif args.model_type == "gpt2":
lowerCAmelCase : List[Any] = GPTaLMHeadModel.from_pretrained(args.model_name)
lowerCAmelCase : Union[str, Any] = 'transformer'
lowerCAmelCase : int = model.state_dict()
lowerCAmelCase : Tuple = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
lowerCAmelCase : Union[str, Any] = state_dict[F'{prefix}.{param_name}']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
lowerCAmelCase : Any = F'{prefix}.embeddings.{w}.weight'
lowerCAmelCase : Optional[int] = state_dict[param_name]
for w in ["weight", "bias"]:
lowerCAmelCase : Optional[Any] = F'{prefix}.embeddings.LayerNorm.{w}'
lowerCAmelCase : str = state_dict[param_name]
# Transformer Blocks #
lowerCAmelCase : List[Any] = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
lowerCAmelCase : List[str] = state_dict[
F'{prefix}.h.{teacher_idx}.{layer}.{w}'
]
lowerCAmelCase : Union[str, Any] = state_dict[F'{prefix}.h.{teacher_idx}.attn.bias']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
lowerCAmelCase : Union[str, Any] = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
lowerCAmelCase : Union[str, Any] = state_dict[F'{layer}']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCAmelCase : Union[str, Any] = state_dict[F'lm_head.dense.{w}']
lowerCAmelCase : List[str] = state_dict[F'lm_head.layer_norm.{w}']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
lowerCAmelCase : str = state_dict[F'{prefix}.ln_f.{w}']
lowerCAmelCase : int = state_dict['lm_head.weight']
print(F'N layers selected for distillation: {std_idx}')
print(F'Number of params transferred for distillation: {len(compressed_sd.keys())}')
print(F'Save transferred checkpoint to {args.dump_checkpoint}.')
torch.save(compressed_sd, args.dump_checkpoint)
| 511 | 0 |
"""simple docstring"""
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : Dict ):
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] , _lowerCamelCase : int ):
__a : Tuple = tmp_path / """cache"""
__a : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__a : Dict = SqlDatasetReader(
"""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase ).read()
_check_sql_dataset(_lowerCamelCase , _lowerCamelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : Any , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str] ):
__a : Dict = tmp_path / """cache"""
__a : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__a : Dict = features.copy() if features else default_expected_features
__a : Union[str, Any] = (
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__a : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_sql_dataset(_lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : List[str] ):
with contextlib.closing(sqlitea.connect(_lowerCamelCase ) ) as con:
__a : str = con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def __magic_name__ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] ):
__a : Optional[int] = tmp_path / """cache"""
__a : Optional[Any] = os.path.join(_lowerCamelCase , """tmp.sql""" )
__a : int = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=_lowerCamelCase ).read()
SqlDatasetWriter(_lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=1 ).write()
__a : Union[str, Any] = iter_sql_file(_lowerCamelCase )
__a : Optional[int] = iter_sql_file(_lowerCamelCase )
for rowa, rowa in zip(_lowerCamelCase , _lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : Tuple ):
__a : Any = tmp_path / """cache"""
__a : Any = os.path.join(_lowerCamelCase , """tmp.sql""" )
__a : Optional[Any] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=_lowerCamelCase ).read()
SqlDatasetWriter(_lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=2 ).write()
__a : Dict = iter_sql_file(_lowerCamelCase )
__a : Optional[Any] = iter_sql_file(_lowerCamelCase )
for rowa, rowa in zip(_lowerCamelCase , _lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def __magic_name__ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict , _lowerCamelCase : Dict ):
__a : Dict = tmp_path / """cache"""
__a : int = os.path.join(_lowerCamelCase , """tmp.sql""" )
__a : str = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=_lowerCamelCase ).read()
with pytest.raises(_lowerCamelCase ):
SqlDatasetWriter(_lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=0 ).write()
| 63 |
"""simple docstring"""
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = 42
_lowerCAmelCase = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 63 | 1 |
'''simple docstring'''
import torch
def __snake_case ():
"""simple docstring"""
if torch.cuda.is_available():
lowerCamelCase_ : Optional[int] = torch.cuda.device_count()
else:
lowerCamelCase_ : str = 0
print(F"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main()
| 501 |
'''simple docstring'''
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(__UpperCAmelCase ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 501 | 1 |
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = ''''''
lowerCamelCase_ = ''''''
lowerCamelCase_ = []
lowerCamelCase_ = 0
lowerCamelCase_ = 256
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> Any:
'''simple docstring'''
lowerCamelCase_ = cva.imread(UpperCamelCase__ , 0 )
lowerCamelCase_ = copy.deepcopy(self.img )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = plt.hist(self.img.ravel() , 256 , [0, 256] , label='''x''' )
lowerCamelCase_ = np.sum(UpperCamelCase__ )
for i in range(len(UpperCamelCase__ ) ):
lowerCamelCase_ = x[i] / self.k
self.sk += prk
lowerCamelCase_ = (self.L - 1) * self.sk
if self.rem != 0:
lowerCamelCase_ = int(last % last )
lowerCamelCase_ = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(UpperCamelCase__ )
lowerCamelCase_ = int(np.ma.count(self.img ) / self.img[1].size )
lowerCamelCase_ = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowerCamelCase_ = self.img[j][i]
if num != self.last_list[num]:
lowerCamelCase_ = self.last_list[num]
cva.imwrite('''output_data/output.jpg''' , self.img )
def _lowerCAmelCase ( self ) -> str:
'''simple docstring'''
plt.hist(self.img.ravel() , 256 , [0, 256] )
def _lowerCAmelCase ( self ) -> int:
'''simple docstring'''
cva.imshow('''Output-Image''' , self.img )
cva.imshow('''Input-Image''' , self.original_image )
cva.waitKey(5_000 )
cva.destroyAllWindows()
if __name__ == "__main__":
__lowercase : List[Any] = os.path.join(os.path.basename(__file__), """image_data/input.jpg""")
__lowercase : List[str] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image() | 66 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase : Union[str, Any] = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Tuple = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
__lowercase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 66 | 1 |
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : Optional[int] = ["note_seq"]
def __init__(self : Optional[int] , *snake_case_ : int , **snake_case_ : Tuple ):
requires_backends(self , ['''note_seq'''] )
@classmethod
def lowerCAmelCase (cls : Optional[int] , *snake_case_ : int , **snake_case_ : str ):
requires_backends(cls , ['''note_seq'''] )
@classmethod
def lowerCAmelCase (cls : List[str] , *snake_case_ : List[Any] , **snake_case_ : Any ):
requires_backends(cls , ['''note_seq'''] )
| 521 |
from manim import *
class UpperCamelCase__ ( __lowercase ):
def lowerCAmelCase (self : Any ):
__a : List[Any] = Rectangle(height=0.5 , width=0.5 )
__a : Tuple = Rectangle(height=0.25 , width=0.25 )
__a : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__a : List[str] = [mem.copy() for i in range(6 )]
__a : Tuple = [mem.copy() for i in range(6 )]
__a : List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
__a : str = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
__a : int = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 )
__a : int = Text('''CPU''' , font_size=2_4 )
__a : Tuple = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case_ )
__a : Optional[Any] = [mem.copy() for i in range(4 )]
__a : str = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
__a : Tuple = Text('''GPU''' , font_size=2_4 )
__a : List[str] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
gpu.move_to([-1, -1, 0] )
self.add(snake_case_ )
__a : Union[str, Any] = [mem.copy() for i in range(6 )]
__a : int = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
__a : List[str] = Text('''Model''' , font_size=2_4 )
__a : Optional[Any] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
model.move_to([3, -1.0, 0] )
self.add(snake_case_ )
__a : List[Any] = []
__a : str = []
__a : Optional[int] = []
for i, rect in enumerate(snake_case_ ):
rect.set_stroke(snake_case_ )
__a : Optional[int] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(snake_case_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=snake_case_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=snake_case_ , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=snake_case_ , buff=0.0 )
self.add(snake_case_ )
model_cpu_arr.append(snake_case_ )
self.add(*snake_case_ , *snake_case_ , *snake_case_ )
__a : str = [mem.copy() for i in range(6 )]
__a : Optional[int] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
__a : List[str] = Text('''Loaded Checkpoint''' , font_size=2_4 )
__a : Any = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
checkpoint.move_to([3, 0.5, 0] )
self.add(snake_case_ )
__a : Tuple = []
__a : Union[str, Any] = []
for i, rect in enumerate(snake_case_ ):
__a : Optional[int] = fill.copy().set_fill(snake_case_ , opacity=0.7 )
target.move_to(snake_case_ )
ckpt_arr.append(snake_case_ )
__a : Optional[Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(snake_case_ )
self.add(*snake_case_ , *snake_case_ )
__a : int = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__a : List[Any] = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(snake_case_ , snake_case_ )
__a : Any = MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=1_8 , )
blue_text.next_to(snake_case_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(snake_case_ )
__a : int = MarkupText(
f"Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device." , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
__a : Union[str, Any] = [meta_mem.copy() for i in range(6 )]
__a : List[Any] = [meta_mem.copy() for i in range(6 )]
__a : Any = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
__a : Union[str, Any] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
__a : Tuple = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 )
__a : List[Any] = Text('''Disk''' , font_size=2_4 )
__a : Union[str, Any] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(snake_case_ , run_time=3 ) , Write(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) )
__a : Union[str, Any] = []
for i, rect in enumerate(snake_case_ ):
__a : List[str] = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(snake_case_ , run_time=1.5 ) )
self.play(*snake_case_ )
self.play(FadeOut(snake_case_ ) )
__a : Optional[int] = MarkupText(f"Then, the checkpoint is removed from memory\nthrough garbage collection." , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case_ , run_time=3 ) )
self.play(
FadeOut(snake_case_ , snake_case_ , *snake_case_ , *snake_case_ ) , )
self.wait()
| 521 | 1 |
from typing import List
import numpy as np
def A__ ( snake_case_ : dict ):
SCREAMING_SNAKE_CASE__: Tuple= {key: len(snake_case_ ) for key, value in gen_kwargs.items() if isinstance(snake_case_ , snake_case_ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'''Sharding is ambiguous for this dataset: '''
+ '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'''
+ '''\n'''.join(F'\t- key {key} has length {length}' for key, length in lists_lengths.items() )
+ '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '''
+ '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'''
) )
SCREAMING_SNAKE_CASE__: List[Any]= max(lists_lengths.values() , default=0 )
return max(1 , snake_case_ )
def A__ ( snake_case_ : int , snake_case_ : int ):
SCREAMING_SNAKE_CASE__: List[Any]= []
for group_idx in range(snake_case_ ):
SCREAMING_SNAKE_CASE__: int= num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
SCREAMING_SNAKE_CASE__: str= shards_indices_per_group[-1].stop if shards_indices_per_group else 0
SCREAMING_SNAKE_CASE__: List[Any]= range(snake_case_ , start + num_shards_to_add )
shards_indices_per_group.append(snake_case_ )
return shards_indices_per_group
def A__ ( snake_case_ : dict , snake_case_ : int ):
SCREAMING_SNAKE_CASE__: Any= _number_of_shards_in_gen_kwargs(snake_case_ )
if num_shards == 1:
return [dict(snake_case_ )]
else:
SCREAMING_SNAKE_CASE__: Optional[Any]= _distribute_shards(num_shards=snake_case_ , max_num_jobs=snake_case_ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(snake_case_ , snake_case_ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(snake_case_ ) )
]
def A__ ( snake_case_ : List[dict] ):
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , snake_case_ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def A__ ( snake_case_ : np.random.Generator , snake_case_ : dict ):
SCREAMING_SNAKE_CASE__: Tuple= {len(snake_case_ ) for value in gen_kwargs.values() if isinstance(snake_case_ , snake_case_ )}
SCREAMING_SNAKE_CASE__: Tuple= {}
for size in list_sizes:
SCREAMING_SNAKE_CASE__: int= list(range(snake_case_ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
SCREAMING_SNAKE_CASE__: int= dict(snake_case_ )
for key, value in shuffled_kwargs.items():
if isinstance(snake_case_ , snake_case_ ):
SCREAMING_SNAKE_CASE__: List[Any]= [value[i] for i in indices_per_size[len(snake_case_ )]]
return shuffled_kwargs
| 107 | import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
lowercase_ : Tuple = getLogger(__name__)
def A__ ( snake_case_ : Optional[int] , snake_case_ : str , snake_case_ : str , snake_case_ : int = 8 , snake_case_ : int = 1_024 , snake_case_ : Union[str, Any]="val" , snake_case_ : Tuple=None , snake_case_ : int=False , snake_case_ : Optional[Any]="summarization" , snake_case_ : Optional[Any]=None , snake_case_ : List[Any]=1 , snake_case_ : Dict = None , snake_case_ : int="" , **snake_case_ : Any , ):
SCREAMING_SNAKE_CASE__: List[str]= str(snake_case_ )
assert local_rank is not None
torch.distributed.init_process_group(backend='''nccl''' , rank=snake_case_ )
SCREAMING_SNAKE_CASE__: str= Path(snake_case_ )
SCREAMING_SNAKE_CASE__: Tuple= save_dir.joinpath(F'rank_{local_rank}_output.json' )
torch.cuda.set_device(snake_case_ )
SCREAMING_SNAKE_CASE__: Tuple= AutoModelForSeqaSeqLM.from_pretrained(snake_case_ ).cuda()
if fpaa:
SCREAMING_SNAKE_CASE__: Dict= model.half()
# determine if we need to increase num_beams
use_task_specific_params(snake_case_ , snake_case_ ) # update config with task specific params
SCREAMING_SNAKE_CASE__: List[Any]= generate_kwargs.pop('''num_beams''' , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
SCREAMING_SNAKE_CASE__: str= num_return_sequences
SCREAMING_SNAKE_CASE__: Optional[int]= AutoTokenizer.from_pretrained(snake_case_ )
logger.info(F'Inferred tokenizer type: {tokenizer.__class__}' ) # if this is wrong, check config.model_type.
if max_source_length is None:
SCREAMING_SNAKE_CASE__: Dict= tokenizer.model_max_length
if prefix is None:
SCREAMING_SNAKE_CASE__: List[Any]= prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
SCREAMING_SNAKE_CASE__: Optional[int]= SeqaSeqDataset(
snake_case_ , snake_case_ , snake_case_ , max_target_length=1_024 , type_path=snake_case_ , n_obs=snake_case_ , prefix=snake_case_ , **snake_case_ , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
SCREAMING_SNAKE_CASE__: Union[str, Any]= ds.make_sortish_sampler(snake_case_ , distributed=snake_case_ , add_extra_examples=snake_case_ , shuffle=snake_case_ )
SCREAMING_SNAKE_CASE__: List[Any]= DataLoader(snake_case_ , sampler=snake_case_ , batch_size=snake_case_ , collate_fn=ds.collate_fn )
SCREAMING_SNAKE_CASE__: List[Any]= []
for batch in tqdm(snake_case_ ):
SCREAMING_SNAKE_CASE__: List[Any]= model.generate(
input_ids=batch['''input_ids'''].to(model.device ) , attention_mask=batch['''attention_mask'''].to(model.device ) , num_return_sequences=snake_case_ , num_beams=snake_case_ , **snake_case_ , )
SCREAMING_SNAKE_CASE__: int= tokenizer.batch_decode(snake_case_ , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ )
SCREAMING_SNAKE_CASE__: int= batch['''ids''']
if num_return_sequences > 1:
SCREAMING_SNAKE_CASE__: List[Any]= chunks(snake_case_ , snake_case_ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(snake_case_ ):
results.append({'''pred''': pred, '''id''': ids[i].item()} )
save_json(snake_case_ , snake_case_ )
return results, sampler.num_replicas
def A__ ( ):
SCREAMING_SNAKE_CASE__: List[Any]= argparse.ArgumentParser(
epilog='''Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate''' )
parser.add_argument('''--data_dir''' , type=snake_case_ , help='''like cnn_dm/test.source''' )
parser.add_argument(
'''--model_name''' , type=snake_case_ , help='''like facebook/bart-large-cnn,t5-base, etc.''' , default='''sshleifer/distilbart-xsum-12-3''' , )
parser.add_argument('''--save_dir''' , type=snake_case_ , help='''where to save''' , default='''tmp_gen''' )
parser.add_argument('''--max_source_length''' , type=snake_case_ , default=snake_case_ )
parser.add_argument(
'''--type_path''' , type=snake_case_ , default='''test''' , help='''which subset to evaluate typically train/val/test''' )
parser.add_argument('''--task''' , type=snake_case_ , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=snake_case_ , default=8 , required=snake_case_ , help='''batch size''' )
parser.add_argument(
'''--local_rank''' , type=snake_case_ , default=-1 , required=snake_case_ , help='''should be passed by distributed.launch''' )
parser.add_argument(
'''--n_obs''' , type=snake_case_ , default=snake_case_ , required=snake_case_ , help='''How many observations. Defaults to all.''' )
parser.add_argument(
'''--num_return_sequences''' , type=snake_case_ , default=1 , required=snake_case_ , help='''How many sequences to return''' )
parser.add_argument(
'''--sync_timeout''' , type=snake_case_ , default=600 , required=snake_case_ , help='''How long should master process wait for other processes to finish.''' , )
parser.add_argument('''--src_lang''' , type=snake_case_ , default=snake_case_ , required=snake_case_ )
parser.add_argument('''--tgt_lang''' , type=snake_case_ , default=snake_case_ , required=snake_case_ )
parser.add_argument(
'''--prefix''' , type=snake_case_ , required=snake_case_ , default=snake_case_ , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--debug''' , action='''store_true''' )
SCREAMING_SNAKE_CASE__: Dict= time.time()
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Dict= parser.parse_known_args()
SCREAMING_SNAKE_CASE__: Tuple= parse_numeric_n_bool_cl_kwargs(snake_case_ )
if generate_kwargs and args.local_rank <= 0:
print(F'parsed the following generate kwargs: {generate_kwargs}' )
SCREAMING_SNAKE_CASE__: List[Any]= Path(args.save_dir + '''_tmp''' )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) # this handles locking.
SCREAMING_SNAKE_CASE__: Dict= list(json_save_dir.glob('''rank_*.json''' ) )
if intermediate_files:
raise ValueError(F'Found files at {json_save_dir} please move or remove them.' )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
SCREAMING_SNAKE_CASE__: List[str]= {}
if args.src_lang is not None:
SCREAMING_SNAKE_CASE__: str= args.src_lang
if args.tgt_lang is not None:
SCREAMING_SNAKE_CASE__: Any= args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=snake_case_ )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= eval_data_dir(
args.data_dir , snake_case_ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=snake_case_ , **snake_case_ , )
if args.local_rank <= 0:
SCREAMING_SNAKE_CASE__: str= Path(args.save_dir )
save_dir.mkdir(exist_ok=snake_case_ )
SCREAMING_SNAKE_CASE__: Optional[Any]= gather_results_from_each_node(snake_case_ , snake_case_ , args.sync_timeout )
SCREAMING_SNAKE_CASE__: Union[str, Any]= combine_partial_results(snake_case_ )
if args.num_return_sequences > 1:
SCREAMING_SNAKE_CASE__: Optional[Any]= save_dir.joinpath('''pseudolabel_results.json''' )
print(F'Saving aggregated results at {save_path}, intermediate in {json_save_dir}/' )
save_json(snake_case_ , snake_case_ )
return
SCREAMING_SNAKE_CASE__: Union[str, Any]= Path(args.data_dir ).joinpath(args.type_path + '''.target''' )
with open(snake_case_ ) as f:
SCREAMING_SNAKE_CASE__: List[str]= [x.rstrip() for x in f.readlines()][: len(snake_case_ )]
# Calculate metrics, save metrics, and save _generations.txt
SCREAMING_SNAKE_CASE__: Optional[int]= '''translation''' in args.task
SCREAMING_SNAKE_CASE__: str= calculate_bleu if calc_bleu else calculate_rouge
SCREAMING_SNAKE_CASE__: Dict= '''bleu''' if calc_bleu else '''rouge'''
SCREAMING_SNAKE_CASE__: Dict= score_fn(snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE__: List[str]= len(snake_case_ )
SCREAMING_SNAKE_CASE__: Dict= time.time() - start_time
SCREAMING_SNAKE_CASE__: List[str]= round(runtime / metrics['''n_obs'''] , 4 )
SCREAMING_SNAKE_CASE__: str= num_replicas
# TODO(@stas00): add whatever metadata to metrics
SCREAMING_SNAKE_CASE__: str= save_dir.joinpath(F'{args.type_path}_{metric_name}.json' )
save_json(snake_case_ , snake_case_ , indent=snake_case_ )
print(snake_case_ )
write_txt_file(snake_case_ , save_dir.joinpath(F'{args.type_path}_generations.txt' ) )
if args.debug:
write_txt_file(snake_case_ , save_dir.joinpath(F'{args.type_path}.target' ) )
else:
shutil.rmtree(snake_case_ )
def A__ ( snake_case_ : Optional[int] ):
SCREAMING_SNAKE_CASE__: int= []
for partial_result in partial_results:
records.extend(snake_case_ )
SCREAMING_SNAKE_CASE__: str= sorted(snake_case_ , key=lambda snake_case_ : x["id"] )
SCREAMING_SNAKE_CASE__: int= [x['''pred'''] for x in records]
return preds
def A__ ( snake_case_ : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : int ):
# WAIT FOR lots of .json files
SCREAMING_SNAKE_CASE__: Optional[int]= time.time()
logger.info('''waiting for all nodes to finish''' )
SCREAMING_SNAKE_CASE__: Optional[Any]= None
while (time.time() - start_wait) < timeout:
SCREAMING_SNAKE_CASE__: List[Any]= list(save_dir.glob('''rank_*.json''' ) )
if len(snake_case_ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
SCREAMING_SNAKE_CASE__: Optional[int]= lmap(snake_case_ , snake_case_ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('''Rank 0 gave up on waiting for other processes''' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 107 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
__a : int = None
__a : str = logging.get_logger(__name__)
__a : List[Any] = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
__a : Any = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json',
},
}
__a : Tuple = {
'camembert-base': 5_1_2,
}
__a : List[Any] = '▁'
class __UpperCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['''input_ids''', '''attention_mask''']
lowercase = CamembertTokenizer
def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="<s>" , SCREAMING_SNAKE_CASE="</s>" , SCREAMING_SNAKE_CASE="</s>" , SCREAMING_SNAKE_CASE="<s>" , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE="<pad>" , SCREAMING_SNAKE_CASE="<mask>" , SCREAMING_SNAKE_CASE=["<s>NOTUSED", "</s>NOTUSED"] , **SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
super().__init__(
_lowerCamelCase , tokenizer_file=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , **_lowerCamelCase , )
UpperCamelCase = vocab_file
UpperCamelCase = False if not self.vocab_file else True
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[str]:
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(_lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase = os.path.join(
_lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ):
copyfile(self.vocab_file , _lowerCamelCase )
return (out_vocab_file,)
| 606 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase:
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=1_3 , _lowerCamelCase=3_2 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=1_6 , _lowerCamelCase=[3_2, 6_4, 1_2_8] , _lowerCamelCase=[1, 2, 1] , _lowerCamelCase=[2, 2, 4] , _lowerCamelCase=2 , _lowerCamelCase=2.0 , _lowerCamelCase=True , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.1 , _lowerCamelCase="gelu" , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-5 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=1_0 , _lowerCamelCase=8 , _lowerCamelCase=["stage1", "stage2"] , _lowerCamelCase=[1, 2] , ):
UpperCamelCase_: Tuple = parent
UpperCamelCase_: Dict = batch_size
UpperCamelCase_: List[str] = image_size
UpperCamelCase_: Tuple = patch_size
UpperCamelCase_: Tuple = num_channels
UpperCamelCase_: Dict = embed_dim
UpperCamelCase_: List[Any] = hidden_sizes
UpperCamelCase_: List[str] = depths
UpperCamelCase_: List[str] = num_heads
UpperCamelCase_: Optional[int] = window_size
UpperCamelCase_: Tuple = mlp_ratio
UpperCamelCase_: Dict = qkv_bias
UpperCamelCase_: str = hidden_dropout_prob
UpperCamelCase_: Optional[Any] = attention_probs_dropout_prob
UpperCamelCase_: int = drop_path_rate
UpperCamelCase_: Dict = hidden_act
UpperCamelCase_: List[str] = use_absolute_embeddings
UpperCamelCase_: Dict = patch_norm
UpperCamelCase_: Optional[Any] = layer_norm_eps
UpperCamelCase_: List[str] = initializer_range
UpperCamelCase_: List[Any] = is_training
UpperCamelCase_: Optional[int] = scope
UpperCamelCase_: str = use_labels
UpperCamelCase_: List[str] = type_sequence_label_size
UpperCamelCase_: Union[str, Any] = encoder_stride
UpperCamelCase_: Dict = out_features
UpperCamelCase_: str = out_indices
def _a ( self ):
UpperCamelCase_: int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_: List[Any] = None
if self.use_labels:
UpperCamelCase_: Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_: Optional[Any] = self.get_config()
return config, pixel_values, labels
def _a ( self ):
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Optional[int] = FocalNetModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: int = model(_lowerCamelCase )
UpperCamelCase_: int = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCamelCase_: int = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: List[str] = FocalNetBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: Optional[int] = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
UpperCamelCase_: int = None
UpperCamelCase_: List[Any] = FocalNetBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: Any = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Tuple = FocalNetForMaskedImageModeling(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: Any = model(_lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCamelCase_: List[Any] = 1
UpperCamelCase_: Dict = FocalNetForMaskedImageModeling(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase_: Union[str, Any] = model(_lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Tuple = self.type_sequence_label_size
UpperCamelCase_: List[Any] = FocalNetForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: str = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase_: Union[str, Any] = 1
UpperCamelCase_: Dict = FocalNetForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase_: Union[str, Any] = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self ):
UpperCamelCase_: Dict = self.prepare_config_and_inputs()
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: List[str] = config_and_inputs
UpperCamelCase_: int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : Optional[int] =(
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
a : Any =(
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
a : Dict =False
a : Union[str, Any] =False
a : Tuple =False
a : Optional[int] =False
a : Union[str, Any] =False
def _a ( self ):
UpperCamelCase_: str = FocalNetModelTester(self )
UpperCamelCase_: Tuple = ConfigTester(self , config_class=_lowerCamelCase , embed_dim=3_7 , has_text_modality=_lowerCamelCase )
def _a ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self ):
return
def _a ( self ):
UpperCamelCase_: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def _a ( self ):
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def _a ( self ):
pass
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCamelCase_: Union[str, Any] = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase_: List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) )
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCamelCase_: List[Any] = model_class(_lowerCamelCase )
UpperCamelCase_: Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_: Any = [*signature.parameters.keys()]
UpperCamelCase_: List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Tuple = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
UpperCamelCase_: Tuple = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
UpperCamelCase_: Union[str, Any] = outputs.hidden_states
UpperCamelCase_: Tuple = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
# FocalNet has a different seq_length
UpperCamelCase_: Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCamelCase_: int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
UpperCamelCase_: Dict = outputs.reshaped_hidden_states
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: int = reshaped_hidden_states[0].shape
UpperCamelCase_: List[str] = (
reshaped_hidden_states[0].view(_lowerCamelCase , _lowerCamelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_: int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
UpperCamelCase_: int = True
self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase_: Optional[Any] = True
self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_: str = 3
UpperCamelCase_: Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCamelCase_: int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCamelCase_: List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCamelCase_: str = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
UpperCamelCase_: Dict = True
self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase_: Optional[Any] = True
self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , (padded_height, padded_width) )
@slow
def _a ( self ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_: List[Any] = FocalNetModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_: Dict = _config_zero_init(_lowerCamelCase )
for model_class in self.all_model_classes:
UpperCamelCase_: List[str] = model_class(config=_lowerCamelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self ):
# TODO update organization
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def _a ( self ):
UpperCamelCase_: Optional[int] = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(_lowerCamelCase )
UpperCamelCase_: Optional[Any] = self.default_image_processor
UpperCamelCase_: str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
UpperCamelCase_: str = image_processor(images=_lowerCamelCase , return_tensors='pt' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_: List[str] = model(**_lowerCamelCase )
# verify the logits
UpperCamelCase_: Optional[int] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
UpperCamelCase_: Optional[int] = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_8_1 )
@require_torch
class _lowerCAmelCase( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] =(FocalNetBackbone,) if is_torch_available() else ()
a : List[str] =FocalNetConfig
a : List[str] =False
def _a ( self ):
UpperCamelCase_: Any = FocalNetModelTester(self ) | 57 | 0 |
from scipy.stats import spearmanr
import datasets
lowercase : Any = "\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n"
lowercase : Optional[int] = "\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {'spearmanr': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results['spearmanr'])\n -0.7\n >>> print(round(results['spearmanr_pvalue'], 2))\n 0.19\n"
lowercase : int = r"\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
"""simple docstring"""
def snake_case ( self ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] , )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> Tuple:
A : Union[str, Any] = spearmanr(__UpperCAmelCase , __UpperCAmelCase )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 718 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase : List[str] = logging.get_logger(__name__)
lowercase : Optional[int] = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __lowercase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ : Dict = '''segformer'''
def __init__( self , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=[2, 2, 2, 2] , __UpperCAmelCase=[8, 4, 2, 1] , __UpperCAmelCase=[32, 64, 1_60, 2_56] , __UpperCAmelCase=[7, 3, 3, 3] , __UpperCAmelCase=[4, 2, 2, 2] , __UpperCAmelCase=[1, 2, 5, 8] , __UpperCAmelCase=[4, 4, 4, 4] , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1E-6 , __UpperCAmelCase=2_56 , __UpperCAmelCase=2_55 , **__UpperCAmelCase , ) -> Union[str, Any]:
super().__init__(**__UpperCAmelCase )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'''Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'''
''' removed, as the behaviour will default to that of reshape_last_stage = True.''' , __UpperCAmelCase , )
A : Optional[int] = num_channels
A : int = num_encoder_blocks
A : Optional[Any] = depths
A : List[str] = sr_ratios
A : List[Any] = hidden_sizes
A : Optional[Any] = patch_sizes
A : Any = strides
A : Dict = mlp_ratios
A : Optional[Any] = num_attention_heads
A : int = hidden_act
A : Optional[int] = hidden_dropout_prob
A : Any = attention_probs_dropout_prob
A : Optional[int] = classifier_dropout_prob
A : List[Any] = initializer_range
A : int = drop_path_rate
A : Union[str, Any] = layer_norm_eps
A : Union[str, Any] = decoder_hidden_size
A : int = kwargs.get('''reshape_last_stage''' , __UpperCAmelCase )
A : str = semantic_loss_ignore_index
class __lowercase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = version.parse('''1.11''' )
@property
def snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def snake_case ( self ) -> float:
return 1E-4
@property
def snake_case ( self ) -> int:
return 12
| 423 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {"configuration_mmbt": ["MMBTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 631 |
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
A = pd.read_csv("""sample_data.csv""", header=None)
A = df.shape[:1][0]
# If you're using some other dataset input the target column
A = df.iloc[:, 1:2]
A = actual_data.values.reshape(len_data, 1)
A = MinMaxScaler().fit_transform(actual_data)
A = 10
A = 5
A = 20
A = len_data - periods * look_back
A = actual_data[:division]
A = actual_data[division - look_back :]
A , A = [], []
A , A = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
A = np.array(train_x)
A = np.array(test_x)
A = np.array([list(i.ravel()) for i in train_y])
A = np.array([list(i.ravel()) for i in test_y])
A = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="""mean_squared_error""", optimizer="""adam""")
A = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
A = model.predict(x_test)
| 77 | 0 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_A = [
# tf -> hf
("/", "."),
("layer_", "layers."),
("kernel", "weight"),
("beta", "bias"),
("gamma", "weight"),
("pegasus", "model"),
]
_A = [
(".output.dense", ".fc2"),
("intermediate.LayerNorm", "final_layer_norm"),
("intermediate.dense", "fc1"),
]
_A = (
INIT_COMMON
+ [
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.out_proj"),
("attention.self", "self_attn"),
("attention.encdec.LayerNorm", "encoder_attn_layer_norm"),
("attention.encdec_output.dense", "encoder_attn.out_proj"),
("attention.encdec", "encoder_attn"),
("key", "k_proj"),
("value", "v_proj"),
("query", "q_proj"),
("decoder.LayerNorm", "decoder.layernorm_embedding"),
]
+ END_COMMON
)
_A = (
INIT_COMMON
+ [
("embeddings.word_embeddings", "shared.weight"),
("embeddings.position_embeddings", "embed_positions.weight"),
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.output"),
("attention.self", "self_attn.self"),
("encoder.LayerNorm", "encoder.layernorm_embedding"),
]
+ END_COMMON
)
_A = [
"encdec/key/bias",
"encdec/query/bias",
"encdec/value/bias",
"self/key/bias",
"self/query/bias",
"self/value/bias",
"encdec_output/dense/bias",
"attention/output/dense/bias",
]
def lowercase_ ( A__ , A__ ) -> Optional[int]:
"""simple docstring"""
for tf_name, hf_name in patterns:
snake_case = k.replace(A__ , A__ )
return k
def lowercase_ ( A__ , A__ ) -> BigBirdPegasusForConditionalGeneration:
"""simple docstring"""
snake_case = BigBirdPegasusConfig(**A__ )
snake_case = BigBirdPegasusForConditionalGeneration(A__ )
snake_case = torch_model.state_dict()
snake_case = {}
# separating decoder weights
snake_case = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )}
snake_case = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )}
for k, v in tqdm(decoder_weights.items() , "tf -> hf conversion" ):
snake_case = [k.endswith(A__ ) for ending in KEYS_TO_IGNORE]
if any(A__ ):
continue
snake_case = DECODER_PATTERNS
snake_case = rename_state_dict_key(A__ , A__ )
if new_k not in state_dict:
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
snake_case = v.T
snake_case = torch.from_numpy(A__ )
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() , "tf -> hf conversion" ):
snake_case = [k.endswith(A__ ) for ending in KEYS_TO_IGNORE]
if any(A__ ):
continue
snake_case = REMAINING_PATTERNS
snake_case = rename_state_dict_key(A__ , A__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
snake_case = v.T
snake_case = torch.from_numpy(A__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
snake_case = mapping["model.embed_positions.weight"]
snake_case = mapping.pop("model.embed_positions.weight" )
snake_case , snake_case = torch_model.load_state_dict(A__ , strict=A__ )
snake_case = [
k
for k in missing
if k
not in [
"final_logits_bias",
"model.encoder.embed_tokens.weight",
"model.decoder.embed_tokens.weight",
"lm_head.weight",
]
]
assert unexpected_missing == [], F'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], F'no matches found for the following tf keys {extra}'
return torch_model
def lowercase_ ( A__ ) -> Dict:
"""simple docstring"""
snake_case = tf.train.list_variables(A__ )
snake_case = {}
snake_case = ["global_step"]
for name, shape in tqdm(A__ , desc="converting tf checkpoint to dict" ):
snake_case = any(pat in name for pat in ignore_name )
if skip_key:
continue
snake_case = tf.train.load_variable(A__ , A__ )
snake_case = array
return tf_weights
def lowercase_ ( A__ , A__ , A__ ) -> List[Any]:
"""simple docstring"""
snake_case = get_tf_weights_as_numpy(A__ )
snake_case = convert_bigbird_pegasus(A__ , A__ )
torch_model.save_pretrained(A__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument("--tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("--save_dir", default=None, type=str, help="Path to the output PyTorch model.")
_A = parser.parse_args()
_A = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 704 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class lowerCamelCase ( A_ , unittest.TestCase ):
UpperCAmelCase__ : Any = CpmAntTokenizer
UpperCAmelCase__ : Optional[Any] = False
def UpperCAmelCase(self : Optional[Any] ) -> Dict:
super().setUp()
snake_case = [
"<d>",
"</d>",
"<s>",
"</s>",
"</_>",
"<unk>",
"<pad>",
"</n>",
"我",
"是",
"C",
"P",
"M",
"A",
"n",
"t",
]
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
@tooslow
def UpperCAmelCase(self : Optional[Any] ) -> Optional[int]:
snake_case = CpmAntTokenizer.from_pretrained("openbmb/cpm-ant-10b" )
snake_case = "今天天气真好!"
snake_case = ["今天", "天气", "真", "好", "!"]
snake_case = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
snake_case = "今天天气真好!"
snake_case = [tokenizer.bos_token] + tokens
snake_case = [6, 9_8_0_2, 1_4_9_6_2, 2_0_8_2, 8_3_1, 2_4_4]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , _A )
snake_case = tokenizer.decode(_A )
self.assertEqual(_A , _A )
| 294 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.