code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
a :Optional[int] = {
"tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt",
"tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt",
"base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt",
"base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt",
"small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt",
"small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt",
"medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt",
"medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt",
"large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt",
"large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt",
}
def _lowercase ( __lowerCAmelCase ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = ['''layers''', '''blocks''']
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
a :str = {
"blocks": "layers",
"mlp.0": "fc1",
"mlp.2": "fc2",
"mlp_ln": "final_layer_norm",
".attn.query": ".self_attn.q_proj",
".attn.key": ".self_attn.k_proj",
".attn.value": ".self_attn.v_proj",
".attn_ln": ".self_attn_layer_norm",
".attn.out": ".self_attn.out_proj",
".cross_attn.query": ".encoder_attn.q_proj",
".cross_attn.key": ".encoder_attn.k_proj",
".cross_attn.value": ".encoder_attn.v_proj",
".cross_attn_ln": ".encoder_attn_layer_norm",
".cross_attn.out": ".encoder_attn.out_proj",
"decoder.ln.": "decoder.layer_norm.",
"encoder.ln.": "encoder.layer_norm.",
"token_embedding": "embed_tokens",
"encoder.positional_embedding": "encoder.embed_positions.weight",
"decoder.positional_embedding": "decoder.embed_positions.weight",
"ln_post": "layer_norm",
}
def _lowercase ( __lowerCAmelCase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Any = list(s_dict.keys() )
for key in keys:
SCREAMING_SNAKE_CASE__ : str = key
for k, v in WHISPER_MAPPING.items():
if k in key:
SCREAMING_SNAKE_CASE__ : Optional[Any] = new_key.replace(__lowerCAmelCase , __lowerCAmelCase )
print(F'''{key} -> {new_key}''' )
SCREAMING_SNAKE_CASE__ : List[str] = s_dict.pop(__lowerCAmelCase )
return s_dict
def _lowercase ( __lowerCAmelCase ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : List[Any] = emb.weight.shape
SCREAMING_SNAKE_CASE__ : Optional[int] = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = emb.weight.data
return lin_layer
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> bytes:
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : int = os.path.basename(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = url.split("""/""" )[-2]
SCREAMING_SNAKE_CASE__ : str = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if os.path.exists(__lowerCAmelCase ) and not os.path.isfile(__lowerCAmelCase ):
raise RuntimeError(F'''{download_target} exists and is not a regular file''' )
if os.path.isfile(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : List[Any] = open(__lowerCAmelCase , """rb""" ).read()
if hashlib.shaaaa(__lowerCAmelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' )
with urllib.request.urlopen(__lowerCAmelCase ) as source, open(__lowerCAmelCase , """wb""" ) as output:
with tqdm(
total=int(source.info().get("""Content-Length""" ) ) , ncols=80 , unit="""iB""" , unit_scale=__lowerCAmelCase , unit_divisor=1024 ) as loop:
while True:
SCREAMING_SNAKE_CASE__ : Dict = source.read(8192 )
if not buffer:
break
output.write(__lowerCAmelCase )
loop.update(len(__lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__ : Tuple = open(__lowerCAmelCase , """rb""" ).read()
if hashlib.shaaaa(__lowerCAmelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"""Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" )
return model_bytes
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
if ".pt" not in checkpoint_path:
SCREAMING_SNAKE_CASE__ : Tuple = _download(_MODELS[checkpoint_path] )
else:
SCREAMING_SNAKE_CASE__ : str = torch.load(__lowerCAmelCase , map_location="""cpu""" )
SCREAMING_SNAKE_CASE__ : Any = original_checkpoint['''dims''']
SCREAMING_SNAKE_CASE__ : int = original_checkpoint['''model_state_dict''']
SCREAMING_SNAKE_CASE__ : Optional[Any] = state_dict['''decoder.token_embedding.weight''']
remove_ignore_keys_(__lowerCAmelCase )
rename_keys(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = True
SCREAMING_SNAKE_CASE__ : Tuple = state_dict['''decoder.layers.0.fc1.weight'''].shape[0]
SCREAMING_SNAKE_CASE__ : int = WhisperConfig(
vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=__lowerCAmelCase , decoder_ffn_dim=__lowerCAmelCase , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , )
SCREAMING_SNAKE_CASE__ : List[Any] = WhisperForConditionalGeneration(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0 and not set(__lowerCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
F''' but all the following weights are missing {missing}''' )
if tie_embeds:
SCREAMING_SNAKE_CASE__ : Any = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
SCREAMING_SNAKE_CASE__ : Dict = proj_out_weights
model.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
a :List[str] = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
a :Any = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 132 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 348 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowercase_ ( lowerCAmelCase__ : Any ):
"""simple docstring"""
if len(__lowerCAmelCase ) < 2:
raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" )
if any(i <= 0 for i in nums ):
raise ValueError("""All values must be greater than 0""" )
__UpperCAmelCase : List[str] = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 254 | import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __snake_case :
def __init__( self , snake_case__ , snake_case__=14 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=False , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=4 , snake_case__=4 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=0.02 , ) -> str:
'''simple docstring'''
UpperCAmelCase : str =parent
UpperCAmelCase : Tuple =batch_size
UpperCAmelCase : Optional[int] =seq_length
UpperCAmelCase : Optional[int] =is_training
UpperCAmelCase : Tuple =use_input_mask
UpperCAmelCase : List[Any] =use_token_type_ids
UpperCAmelCase : Optional[Any] =use_labels
UpperCAmelCase : Union[str, Any] =vocab_size
UpperCAmelCase : List[Any] =hidden_size
UpperCAmelCase : Optional[int] =rotary_dim
UpperCAmelCase : Union[str, Any] =num_hidden_layers
UpperCAmelCase : List[Any] =num_attention_heads
UpperCAmelCase : Dict =intermediate_size
UpperCAmelCase : Union[str, Any] =hidden_act
UpperCAmelCase : Any =hidden_dropout_prob
UpperCAmelCase : Dict =attention_probs_dropout_prob
UpperCAmelCase : Union[str, Any] =max_position_embeddings
UpperCAmelCase : str =initializer_range
UpperCAmelCase : Optional[int] =None
UpperCAmelCase : List[Any] =vocab_size - 1
UpperCAmelCase : Optional[Any] =vocab_size - 1
UpperCAmelCase : List[Any] =vocab_size - 1
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : List[Any] =None
if self.use_input_mask:
UpperCAmelCase : Optional[Any] =random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Dict =GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=snake_case__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Tuple =self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] =config_and_inputs
UpperCAmelCase : Tuple ={'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Any =20
UpperCAmelCase : Any =model_class_name(snake_case__ )
UpperCAmelCase : str =model.init_cache(input_ids.shape[0] , snake_case__ )
UpperCAmelCase : Any =jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
UpperCAmelCase : Optional[Any] =jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
UpperCAmelCase : Optional[Any] =model(
input_ids[:, :-1] , attention_mask=snake_case__ , past_key_values=snake_case__ , position_ids=snake_case__ , )
UpperCAmelCase : List[str] =jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase : Optional[Any] =model(
input_ids[:, -1:] , attention_mask=snake_case__ , past_key_values=outputs_cache.past_key_values , position_ids=snake_case__ , )
UpperCAmelCase : List[Any] =model(snake_case__ )
UpperCAmelCase : Any =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Dict =20
UpperCAmelCase : Dict =model_class_name(snake_case__ )
UpperCAmelCase : Tuple =jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
UpperCAmelCase : Dict =model.init_cache(input_ids.shape[0] , snake_case__ )
UpperCAmelCase : int =jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
UpperCAmelCase : Optional[Any] =model(
input_ids[:, :-1] , attention_mask=snake_case__ , past_key_values=snake_case__ , position_ids=snake_case__ , )
UpperCAmelCase : Any =jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase : str =model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=snake_case__ , position_ids=snake_case__ , )
UpperCAmelCase : Any =model(snake_case__ , attention_mask=snake_case__ )
UpperCAmelCase : Dict =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class __snake_case ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Tuple = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
__lowerCamelCase : Optional[Any] = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =FlaxGPTJModelTester(self )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
@tooslow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Tuple =GPTaTokenizer.from_pretrained('''gpt2''' , pad_token='''<|endoftext|>''' , padding_side='''left''' )
UpperCAmelCase : Optional[Any] =tokenizer(['''Hello this is a long string''', '''Hey'''] , return_tensors='''np''' , padding=snake_case__ , truncation=snake_case__ )
UpperCAmelCase : Optional[int] =FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''' )
UpperCAmelCase : str =False
UpperCAmelCase : Union[str, Any] =model.config.eos_token_id
UpperCAmelCase : List[Any] =jax.jit(model.generate )
UpperCAmelCase : Dict =jit_generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , pad_token_id=tokenizer.pad_token_id ).sequences
UpperCAmelCase : Any =tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )
UpperCAmelCase : Tuple =[
'''Hello this is a long string of text.\n\nI\'m trying to get the text of the''',
'''Hey, I\'m a little late to the party. I\'m going to''',
]
self.assertListEqual(snake_case__ , snake_case__ )
@is_pt_flax_cross_test
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
UpperCAmelCase : Union[str, Any] =self._prepare_for_class(snake_case__ , snake_case__ )
UpperCAmelCase : List[str] ={k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
UpperCAmelCase : Any =model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCAmelCase : Any =getattr(snake_case__ , snake_case__ )
UpperCAmelCase , UpperCAmelCase : Union[str, Any] =pt_inputs['''input_ids'''].shape
UpperCAmelCase : Tuple =np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(snake_case__ ):
UpperCAmelCase : int =0
UpperCAmelCase : Optional[int] =1
UpperCAmelCase : Optional[int] =0
UpperCAmelCase : Union[str, Any] =1
UpperCAmelCase : List[str] =pt_model_class(snake_case__ ).eval()
UpperCAmelCase : Optional[int] =model_class(snake_case__ , dtype=jnp.floataa )
UpperCAmelCase : Any =convert_pytorch_state_dict_to_flax(pt_model.state_dict() , snake_case__ )
UpperCAmelCase : Union[str, Any] =fx_state
with torch.no_grad():
UpperCAmelCase : Any =pt_model(**snake_case__ ).to_tuple()
UpperCAmelCase : Dict =fx_model(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(snake_case__ , snake_case__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(snake_case__ )
UpperCAmelCase : str =model_class.from_pretrained(snake_case__ , from_pt=snake_case__ )
UpperCAmelCase : int =fx_model_loaded(**snake_case__ ).to_tuple()
self.assertEqual(
len(snake_case__ ) , len(snake_case__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(snake_case__ , snake_case__ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Any =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
UpperCAmelCase : Union[str, Any] =self._prepare_for_class(snake_case__ , snake_case__ )
UpperCAmelCase : Union[str, Any] ={k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
UpperCAmelCase : int =model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCAmelCase : int =getattr(snake_case__ , snake_case__ )
UpperCAmelCase : Dict =pt_model_class(snake_case__ ).eval()
UpperCAmelCase : str =model_class(snake_case__ , dtype=jnp.floataa )
UpperCAmelCase : Optional[Any] =load_flax_weights_in_pytorch_model(snake_case__ , fx_model.params )
UpperCAmelCase , UpperCAmelCase : Optional[int] =pt_inputs['''input_ids'''].shape
UpperCAmelCase : Optional[int] =np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(snake_case__ ):
UpperCAmelCase : str =0
UpperCAmelCase : Any =1
UpperCAmelCase : List[Any] =0
UpperCAmelCase : Tuple =1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
UpperCAmelCase : Optional[Any] =pt_model(**snake_case__ ).to_tuple()
UpperCAmelCase : List[Any] =fx_model(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(snake_case__ , snake_case__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(snake_case__ )
UpperCAmelCase : Tuple =pt_model_class.from_pretrained(snake_case__ , from_flax=snake_case__ )
with torch.no_grad():
UpperCAmelCase : Any =pt_model_loaded(**snake_case__ ).to_tuple()
self.assertEqual(
len(snake_case__ ) , len(snake_case__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(snake_case__ , snake_case__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase : str =model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''' )
UpperCAmelCase : Tuple =model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case__ )
| 348 | 0 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = SpeechTaTokenizer
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : Dict = True
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ = SpeechTaTokenizer(snake_case__ )
UpperCAmelCase__ = AddedToken("""<mask>""" , lstrip=snake_case__ , rstrip=snake_case__ )
UpperCAmelCase__ = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = '''this is a test'''
UpperCAmelCase__ = '''this is a test'''
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Union[str, Any]=20 , _UpperCAmelCase : int=5 ):
"""simple docstring"""
UpperCAmelCase__ = self.get_input_output_texts(snake_case__ )
UpperCAmelCase__ = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
UpperCAmelCase__ = tokenizer.decode(snake_case__ , clean_up_tokenization_spaces=snake_case__ )
return text, ids
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = '''<pad>'''
UpperCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-4] , """œ""" )
self.assertEqual(vocab_keys[-2] , """<mask>""" )
self.assertEqual(vocab_keys[-1] , """<ctc_blank>""" )
self.assertEqual(len(snake_case__ ) , 81 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = self.get_tokenizers(do_lower_case=snake_case__ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
UpperCAmelCase__ = tokenizer.vocab_size
UpperCAmelCase__ = len(snake_case__ )
self.assertNotEqual(snake_case__ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
UpperCAmelCase__ = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
UpperCAmelCase__ = tokenizer.add_tokens(snake_case__ )
UpperCAmelCase__ = tokenizer.vocab_size
UpperCAmelCase__ = len(snake_case__ )
self.assertNotEqual(snake_case__ , 0 )
self.assertEqual(snake_case__ , snake_case__ )
self.assertEqual(snake_case__ , len(snake_case__ ) )
self.assertEqual(snake_case__ , all_size + len(snake_case__ ) )
UpperCAmelCase__ = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=snake_case__ )
self.assertGreaterEqual(len(snake_case__ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
UpperCAmelCase__ = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
UpperCAmelCase__ = tokenizer.add_special_tokens(snake_case__ )
UpperCAmelCase__ = tokenizer.vocab_size
UpperCAmelCase__ = len(snake_case__ )
self.assertNotEqual(snake_case__ , 0 )
self.assertEqual(snake_case__ , snake_case__ )
self.assertEqual(snake_case__ , len(snake_case__ ) )
self.assertEqual(snake_case__ , all_size_a + len(snake_case__ ) )
UpperCAmelCase__ = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=snake_case__ )
self.assertGreaterEqual(len(snake_case__ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = tokenizer.tokenize("""This is a test""" )
# fmt: off
self.assertListEqual(snake_case__ , [SPIECE_UNDERLINE, """T""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """a""", SPIECE_UNDERLINE, """t""", """e""", """s""", """t"""] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
UpperCAmelCase__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """92000""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(snake_case__ )
# fmt: off
self.assertListEqual(snake_case__ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """<unk>""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = [
'''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '''
'''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '''
'''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '''
'''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''',
'''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '''
'''conditioning on both left and right context in all layers.''',
'''The quick brown fox jumps over the lazy dog.''',
]
# fmt: off
UpperCAmelCase__ = {
'''input_ids''': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name="""microsoft/speecht5_asr""" , revision="""c5ef64c71905caeccde0e4462ef3f9077224c524""" , sequences=snake_case__ , )
| 346 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case = {
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 348 | 0 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class snake_case__ :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="resnet50" , lowerCAmelCase__=3 , lowerCAmelCase__=32 , lowerCAmelCase__=3 , lowerCAmelCase__=True , lowerCAmelCase__=True , ) -> Dict:
__magic_name__ : Tuple = parent
__magic_name__ : Any = out_indices if out_indices is not None else [4]
__magic_name__ : Dict = stage_names
__magic_name__ : Dict = out_features
__magic_name__ : Optional[int] = backbone
__magic_name__ : List[Any] = batch_size
__magic_name__ : List[Any] = image_size
__magic_name__ : List[str] = num_channels
__magic_name__ : List[str] = use_pretrained_backbone
__magic_name__ : Dict = is_training
def __magic_name__ ( self ) -> Dict:
__magic_name__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ : Tuple = self.get_config()
return config, pixel_values
def __magic_name__ ( self ) -> List[Any]:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
__magic_name__ : int = TimmBackbone(config=snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
__magic_name__ : List[str] = model(snake_case__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : int = self.prepare_config_and_inputs()
__magic_name__ : Any = config_and_inputs
__magic_name__ : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class snake_case__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase__ : Dict = (TimmBackbone,) if is_torch_available() else ()
lowercase__ : Optional[Any] = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
lowercase__ : Dict = False
lowercase__ : str = False
lowercase__ : Dict = False
lowercase__ : str = False
def __magic_name__ ( self ) -> Any:
__magic_name__ : List[Any] = TimmBackboneModelTester(self )
__magic_name__ : Any = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def __magic_name__ ( self ) -> List[str]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __magic_name__ ( self ) -> Dict:
__magic_name__ : List[str] = '''resnet18'''
__magic_name__ : Optional[Any] = '''microsoft/resnet-18'''
__magic_name__ : Optional[int] = AutoBackbone.from_pretrained(snake_case__ , use_timm_backbone=snake_case__ )
__magic_name__ : Optional[int] = AutoBackbone.from_pretrained(snake_case__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__magic_name__ : Dict = AutoBackbone.from_pretrained(snake_case__ , use_timm_backbone=snake_case__ , out_indices=[1, 2, 3] )
__magic_name__ : Tuple = AutoBackbone.from_pretrained(snake_case__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("""TimmBackbone doesn\'t support feed forward chunking""" )
def __magic_name__ ( self ) -> List[str]:
pass
@unittest.skip("""TimmBackbone doesn\'t have num_hidden_layers attribute""" )
def __magic_name__ ( self ) -> int:
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""" )
def __magic_name__ ( self ) -> Union[str, Any]:
pass
@unittest.skip("""TimmBackbone models doesn\'t have inputs_embeds""" )
def __magic_name__ ( self ) -> Optional[int]:
pass
@unittest.skip("""TimmBackbone models doesn\'t have inputs_embeds""" )
def __magic_name__ ( self ) -> List[Any]:
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" )
def __magic_name__ ( self ) -> List[Any]:
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def __magic_name__ ( self ) -> Optional[Any]:
pass
@unittest.skip("""model weights aren\'t tied in TimmBackbone.""" )
def __magic_name__ ( self ) -> Union[str, Any]:
pass
@unittest.skip("""model weights aren\'t tied in TimmBackbone.""" )
def __magic_name__ ( self ) -> List[str]:
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def __magic_name__ ( self ) -> Union[str, Any]:
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def __magic_name__ ( self ) -> Tuple:
pass
@unittest.skip("""TimmBackbone doesn\'t have hidden size info in its configuration.""" )
def __magic_name__ ( self ) -> Any:
pass
@unittest.skip("""TimmBackbone doesn\'t support output_attentions.""" )
def __magic_name__ ( self ) -> List[Any]:
pass
@unittest.skip("""Safetensors is not supported by timm.""" )
def __magic_name__ ( self ) -> Tuple:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __magic_name__ ( self ) -> Union[str, Any]:
pass
def __magic_name__ ( self ) -> int:
__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Optional[int] = model_class(snake_case__ )
__magic_name__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : Optional[int] = [*signature.parameters.keys()]
__magic_name__ : Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case__ )
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Optional[Any] = True
__magic_name__ : Optional[int] = self.has_attentions
# no need to test all models as different heads yield the same functionality
__magic_name__ : List[Any] = self.all_model_classes[0]
__magic_name__ : Union[str, Any] = model_class(snake_case__ )
model.to(snake_case__ )
__magic_name__ : int = self._prepare_for_class(snake_case__ , snake_case__ )
__magic_name__ : Optional[int] = model(**snake_case__ )
__magic_name__ : str = outputs[0][-1]
# Encoder-/Decoder-only models
__magic_name__ : str = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__magic_name__ : str = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=snake_case__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Optional[Any] = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
__magic_name__ : List[str] = model(**snake_case__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__magic_name__ : int = copy.deepcopy(snake_case__ )
__magic_name__ : Tuple = None
__magic_name__ : List[Any] = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
__magic_name__ : Any = model(**snake_case__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__magic_name__ : int = copy.deepcopy(snake_case__ )
__magic_name__ : Optional[int] = False
__magic_name__ : Union[str, Any] = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
__magic_name__ : Optional[Any] = model(**snake_case__ )
| 342 | import os
from typing import Dict, List, Tuple, TypeVar, Union
__snake_case = TypeVar('''T''')
__snake_case = Union[List[T], Tuple[T, ...]]
__snake_case = Union[T, List[T], Dict[str, T]]
__snake_case = Union[str, bytes, os.PathLike]
| 348 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {
"configuration_table_transformer": [
"TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TableTransformerConfig",
"TableTransformerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TableTransformerForObjectDetection",
"TableTransformerModel",
"TableTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 46 | import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__snake_case = None
__snake_case = logging.get_logger(__name__)
__snake_case = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__snake_case = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
},
'''tokenizer_file''': {
'''google/bigbird-roberta-base''': (
'''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'''
),
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'''
),
},
}
__snake_case = {
'''google/bigbird-roberta-base''': 40_96,
'''google/bigbird-roberta-large''': 40_96,
'''google/bigbird-base-trivia-itc''': 40_96,
}
__snake_case = '''▁'''
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : Dict = VOCAB_FILES_NAMES
__lowerCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : List[str] = BigBirdTokenizer
__lowerCamelCase : Any = ["""input_ids""", """attention_mask"""]
__lowerCamelCase : List[int] = []
def __init__( self , snake_case__=None , snake_case__=None , snake_case__="<unk>" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="<pad>" , snake_case__="[SEP]" , snake_case__="[MASK]" , snake_case__="[CLS]" , **snake_case__ , ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Any =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else bos_token
UpperCAmelCase : Optional[int] =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else eos_token
UpperCAmelCase : List[str] =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else unk_token
UpperCAmelCase : Union[str, Any] =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else pad_token
UpperCAmelCase : int =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else cls_token
UpperCAmelCase : str =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase : List[Any] =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , **snake_case__ , )
UpperCAmelCase : Tuple =vocab_file
UpperCAmelCase : Optional[int] =False if not self.vocab_file else True
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase : int =[self.sep_token_id]
UpperCAmelCase : Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None , snake_case__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =[self.sep_token_id]
UpperCAmelCase : Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(snake_case__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase : Optional[int] =os.path.join(
snake_case__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
return (out_vocab_file,)
| 348 | 0 |
def __lowerCamelCase ( __a :Any = 1_0 ) -> str:
"""simple docstring"""
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or n < 0:
raise ValueError("""Invalid input""" )
A__ = 1_0**n
A__ = 2_8_4_3_3 * (pow(2 , 7_8_3_0_4_5_7 , __lowerCAmelCase )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'''{solution(1_0) = }''')
| 274 | from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase_ ( __lowerCAmelCase )-> Optional[Any]:
'''simple docstring'''
def is_in_circle(__lowerCAmelCase , __lowerCAmelCase ) -> bool:
UpperCAmelCase : List[Any] =sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
UpperCAmelCase : List[Any] =mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(__lowerCAmelCase ) )
# The ratio of the area for circle to square is pi/4.
UpperCAmelCase : Dict =proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1.0 , )-> float:
'''simple docstring'''
return mean(
function_to_integrate(uniform(__lowerCAmelCase , __lowerCAmelCase ) ) for _ in range(__lowerCAmelCase ) ) * (max_value - min_value)
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1.0 )-> None:
'''simple docstring'''
def identity_function(__lowerCAmelCase ) -> float:
return x
UpperCAmelCase : List[Any] =area_under_curve_estimator(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
UpperCAmelCase : Dict =(max_value * max_value - min_value * min_value) / 2
print('''******************''' )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print('''******************''' )
def lowerCAmelCase_ ( __lowerCAmelCase )-> None:
'''simple docstring'''
def function_to_integrate(__lowerCAmelCase ) -> float:
return sqrt(4.0 - x * x )
UpperCAmelCase : Dict =area_under_curve_estimator(
__lowerCAmelCase , __lowerCAmelCase , 0.0 , 2.0 )
print('''******************''' )
print('''Estimating pi using area_under_curve_estimator''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print('''******************''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348 | 0 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
UpperCAmelCase = 50_0000
UpperCAmelCase , UpperCAmelCase = os.path.split(__file__)
UpperCAmelCase = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
lowercase = dataset.map(**__lowerCAmelCase )
@get_duration
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
lowercase = dataset.filter(**__lowerCAmelCase )
def UpperCAmelCase_ ( ):
lowercase = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = datasets.Features({'text': datasets.Value('string' ), 'numbers': datasets.Value('float32' )} )
lowercase = generate_example_dataset(
os.path.join(__lowerCAmelCase , 'dataset.arrow' ) , __lowerCAmelCase , num_examples=__lowerCAmelCase )
lowercase = transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=__lowerCAmelCase )
def tokenize(__SCREAMING_SNAKE_CASE ):
return tokenizer(examples['text'] )
lowercase = map(__lowerCAmelCase )
lowercase = map(__lowerCAmelCase , batched=__lowerCAmelCase )
lowercase = map(__lowerCAmelCase , function=lambda __SCREAMING_SNAKE_CASE : None , batched=__lowerCAmelCase )
with dataset.formatted_as(type='numpy' ):
lowercase = map(__lowerCAmelCase , function=lambda __SCREAMING_SNAKE_CASE : None , batched=__lowerCAmelCase )
with dataset.formatted_as(type='pandas' ):
lowercase = map(__lowerCAmelCase , function=lambda __SCREAMING_SNAKE_CASE : None , batched=__lowerCAmelCase )
with dataset.formatted_as(type='torch' , columns='numbers' ):
lowercase = map(__lowerCAmelCase , function=lambda __SCREAMING_SNAKE_CASE : None , batched=__lowerCAmelCase )
with dataset.formatted_as(type='tensorflow' , columns='numbers' ):
lowercase = map(__lowerCAmelCase , function=lambda __SCREAMING_SNAKE_CASE : None , batched=__lowerCAmelCase )
lowercase = map(__lowerCAmelCase , function=__lowerCAmelCase , batched=__lowerCAmelCase )
lowercase = filter(__lowerCAmelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(__lowerCAmelCase , 'wb' ) as f:
f.write(json.dumps(__lowerCAmelCase ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 195 | from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case :
def __init__( self , snake_case__ , snake_case__=12 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=32 , snake_case__=2 , snake_case__=4 , snake_case__=37 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=0.02 , snake_case__=0 , snake_case__=None , ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : List[Any] =parent
UpperCAmelCase : Optional[int] =batch_size
UpperCAmelCase : List[Any] =seq_length
UpperCAmelCase : Optional[int] =is_training
UpperCAmelCase : Union[str, Any] =use_input_mask
UpperCAmelCase : Tuple =use_labels
UpperCAmelCase : Union[str, Any] =vocab_size
UpperCAmelCase : Tuple =hidden_size
UpperCAmelCase : Dict =projection_dim
UpperCAmelCase : Optional[int] =num_hidden_layers
UpperCAmelCase : Dict =num_attention_heads
UpperCAmelCase : int =intermediate_size
UpperCAmelCase : Any =dropout
UpperCAmelCase : Union[str, Any] =attention_dropout
UpperCAmelCase : Union[str, Any] =max_position_embeddings
UpperCAmelCase : List[str] =initializer_range
UpperCAmelCase : str =scope
UpperCAmelCase : str =bos_token_id
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : int =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : int =None
if self.use_input_mask:
UpperCAmelCase : Union[str, Any] =random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
UpperCAmelCase : Optional[int] =input_mask.numpy()
UpperCAmelCase , UpperCAmelCase : List[Any] =input_mask.shape
UpperCAmelCase : Optional[Any] =np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(snake_case__ ):
UpperCAmelCase : List[Any] =1
UpperCAmelCase : Tuple =0
UpperCAmelCase : List[Any] =self.get_config()
return config, input_ids, tf.convert_to_tensor(snake_case__ )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Tuple =TFBlipTextModel(config=snake_case__ )
UpperCAmelCase : List[Any] =model(snake_case__ , attention_mask=snake_case__ , training=snake_case__ )
UpperCAmelCase : str =model(snake_case__ , training=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[str] =self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] =config_and_inputs
UpperCAmelCase : Optional[int] ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __snake_case ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Optional[int] = (TFBlipTextModel,) if is_tf_available() else ()
__lowerCamelCase : Dict = False
__lowerCamelCase : Optional[Any] = False
__lowerCamelCase : Dict = False
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : str =BlipTextModelTester(self )
UpperCAmelCase : Optional[int] =ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
pass
@slow
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Optional[Any] =TFBlipTextModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def UpperCAmelCase__ ( self , snake_case__=True ) -> Any:
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=snake_case__ )
| 348 | 0 |
'''simple docstring'''
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ):
UpperCAmelCase__ : Dict = AutoConfig.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
UpperCAmelCase__ : int = AutoModelForSeqaSeqLM.from_config(__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
AutoTokenizer.from_pretrained(__lowerCAmelCase ).save_pretrained(__lowerCAmelCase )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version) | 163 | import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__snake_case = logging.get_logger(__name__)
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> str:
'''simple docstring'''
UpperCAmelCase : Dict =nn.functional.normalize(__lowerCAmelCase )
UpperCAmelCase : Tuple =nn.functional.normalize(__lowerCAmelCase )
return torch.mm(__lowerCAmelCase , normalized_text_embeds.t() )
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : List[str] = CLIPConfig
__lowerCamelCase : List[Any] = ["""CLIPEncoderLayer"""]
def __init__( self , snake_case__ ) -> Dict:
'''simple docstring'''
super().__init__(snake_case__ )
UpperCAmelCase : Dict =CLIPVisionModel(config.vision_config )
UpperCAmelCase : Optional[Any] =nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=snake_case__ )
UpperCAmelCase : int =nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=snake_case__ )
UpperCAmelCase : List[str] =nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=snake_case__ )
UpperCAmelCase : str =nn.Parameter(torch.ones(17 ) , requires_grad=snake_case__ )
UpperCAmelCase : Optional[int] =nn.Parameter(torch.ones(3 ) , requires_grad=snake_case__ )
@torch.no_grad()
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =self.vision_model(snake_case__ )[1] # pooled_output
UpperCAmelCase : Optional[Any] =self.visual_projection(snake_case__ )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCAmelCase : List[str] =cosine_distance(snake_case__ , self.special_care_embeds ).cpu().float().numpy()
UpperCAmelCase : Optional[Any] =cosine_distance(snake_case__ , self.concept_embeds ).cpu().float().numpy()
UpperCAmelCase : Tuple =[]
UpperCAmelCase : Dict =image_embeds.shape[0]
for i in range(snake_case__ ):
UpperCAmelCase : str ={'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
UpperCAmelCase : str =0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
UpperCAmelCase : Optional[Any] =special_cos_dist[i][concept_idx]
UpperCAmelCase : Union[str, Any] =self.special_care_embeds_weights[concept_idx].item()
UpperCAmelCase : str =round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} )
UpperCAmelCase : int =0.01
for concept_idx in range(len(cos_dist[0] ) ):
UpperCAmelCase : Any =cos_dist[i][concept_idx]
UpperCAmelCase : Optional[int] =self.concept_embeds_weights[concept_idx].item()
UpperCAmelCase : int =round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(snake_case__ )
result.append(snake_case__ )
UpperCAmelCase : Optional[int] =[len(res['''bad_concepts'''] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Any =self.vision_model(snake_case__ )[1] # pooled_output
UpperCAmelCase : List[str] =self.visual_projection(snake_case__ )
UpperCAmelCase : Any =cosine_distance(snake_case__ , self.special_care_embeds )
UpperCAmelCase : Optional[Any] =cosine_distance(snake_case__ , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
UpperCAmelCase : Optional[Any] =0.0
UpperCAmelCase : Any =special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
UpperCAmelCase : str =torch.any(special_scores > 0 , dim=1 )
UpperCAmelCase : List[Any] =special_care * 0.01
UpperCAmelCase : Union[str, Any] =special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
UpperCAmelCase : List[Any] =(cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
UpperCAmelCase : str =torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 348 | 0 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
SCREAMING_SNAKE_CASE :int = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
snake_case_ = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
snake_case_ = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case_ = field(
default=lowerCamelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def UpperCamelCase_ ( self : int ):
__A = self.task_name.lower()
class UpperCAmelCase ( lowerCamelCase__ ):
'''simple docstring'''
snake_case_ = """train"""
snake_case_ = """dev"""
snake_case_ = """test"""
class UpperCAmelCase ( lowerCamelCase__ ):
'''simple docstring'''
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
def __init__( self : str ,A : Optional[int] ,A : Tuple ,A : Any = None ,A : int = Split.train ,A : int = None ,):
warnings.warn(
"This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" ,snake_case__ ,)
__A = args
__A = glue_processors[args.task_name]()
__A = glue_output_modes[args.task_name]
if isinstance(snake_case__ ,snake_case__ ):
try:
__A = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
# Load data features from cache or dataset file
__A = os.path.join(
cache_dir if cache_dir is not None else args.data_dir ,f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''' ,)
__A = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__A = label_list[2], label_list[1]
__A = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__A = cached_features_file + '''.lock'''
with FileLock(snake_case__ ):
if os.path.exists(snake_case__ ) and not args.overwrite_cache:
__A = time.time()
__A = torch.load(snake_case__ )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' ,time.time() - start )
else:
logger.info(f'''Creating features from dataset file at {args.data_dir}''' )
if mode == Split.dev:
__A = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
__A = self.processor.get_test_examples(args.data_dir )
else:
__A = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
__A = examples[:limit_length]
__A = glue_convert_examples_to_features(
snake_case__ ,snake_case__ ,max_length=args.max_seq_length ,label_list=snake_case__ ,output_mode=self.output_mode ,)
__A = time.time()
torch.save(self.features ,snake_case__ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self : Optional[int] ):
return len(self.features )
def __getitem__( self : Union[str, Any] ,A : Optional[Any] ):
return self.features[i]
def UpperCamelCase_ ( self : int ):
return self.label_list
| 15 | import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__snake_case = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
__snake_case = parser.parse_args()
__snake_case = '''cpu'''
__snake_case = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
__snake_case = '''path-to-your-trained-model'''
__snake_case = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__snake_case = pipe.to(device)
# to channels last
__snake_case = pipe.unet.to(memory_format=torch.channels_last)
__snake_case = pipe.vae.to(memory_format=torch.channels_last)
__snake_case = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__snake_case = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__snake_case = torch.randn(2, 4, 64, 64)
__snake_case = torch.rand(1) * 9_99
__snake_case = torch.randn(2, 77, 7_68)
__snake_case = (sample, timestep, encoder_hidden_status)
try:
__snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__snake_case = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__snake_case = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__snake_case = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__snake_case = 6_66
__snake_case = torch.Generator(device).manual_seed(seed)
__snake_case = {'''generator''': generator}
if args.steps is not None:
__snake_case = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__snake_case = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 348 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase: str = logging.get_logger(__name__)
_lowercase: str = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class _lowercase ( lowerCamelCase__ ):
"""simple docstring"""
__A = """megatron-bert"""
def __init__(self , lowerCamelCase_=29056 , lowerCamelCase_=1024 , lowerCamelCase_=24 , lowerCamelCase_=16 , lowerCamelCase_=4096 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=512 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=1E-1_2 , lowerCamelCase_=0 , lowerCamelCase_="absolute" , lowerCamelCase_=True , **lowerCamelCase_ , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case__ , **snake_case__ )
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = hidden_act
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = initializer_range
a = layer_norm_eps
a = position_embedding_type
a = use_cache
| 227 | __snake_case = '''Input must be a string of 8 numbers plus letter'''
__snake_case = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def lowerCAmelCase_ ( __lowerCAmelCase )-> bool:
'''simple docstring'''
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
UpperCAmelCase : Optional[Any] =f'''Expected string as input, found {type(__lowerCAmelCase ).__name__}'''
raise TypeError(__lowerCAmelCase )
UpperCAmelCase : List[Any] =spanish_id.replace('''-''' , '''''' ).upper()
if len(__lowerCAmelCase ) != 9:
raise ValueError(__lowerCAmelCase )
try:
UpperCAmelCase : int =int(spanish_id_clean[0:8] )
UpperCAmelCase : Optional[int] =spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__lowerCAmelCase ) from ex
if letter.isdigit():
raise ValueError(__lowerCAmelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348 | 0 |
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
lowercase_ = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
lowercase_ = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(1_0_0_0_0):
out_file.write(data)
lowercase_ = BeautifulSoup(res.text, 'html.parser')
lowercase_ = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(f"https://google.com{link.get('href')}")
| 205 | def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> str:
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
UpperCAmelCase : Dict =str(bin(__lowerCAmelCase ) )
binary_number += "0" * shift_amount
return binary_number
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> str:
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
UpperCAmelCase : Any =str(bin(__lowerCAmelCase ) )[2:]
if shift_amount >= len(__lowerCAmelCase ):
return "0b0"
UpperCAmelCase : Optional[Any] =binary_number[: len(__lowerCAmelCase ) - shift_amount]
return "0b" + shifted_binary_number
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> str:
'''simple docstring'''
if number >= 0: # Get binary representation of positive number
UpperCAmelCase : Optional[Any] ='''0''' + str(bin(__lowerCAmelCase ) ).strip('''-''' )[2:]
else: # Get binary (2's complement) representation of negative number
UpperCAmelCase : int =len(bin(__lowerCAmelCase )[3:] ) # Find 2's complement of number
UpperCAmelCase : Any =bin(abs(__lowerCAmelCase ) - (1 << binary_number_length) )[3:]
UpperCAmelCase : Optional[Any] =(
'''1''' + '''0''' * (binary_number_length - len(__lowerCAmelCase )) + binary_number
)
if shift_amount >= len(__lowerCAmelCase ):
return "0b" + binary_number[0] * len(__lowerCAmelCase )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__lowerCAmelCase ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348 | 0 |
"""simple docstring"""
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class __a (lowerCamelCase__):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :List[Any] = """char"""
_SCREAMING_SNAKE_CASE :Union[str, Any] = """bpe"""
_SCREAMING_SNAKE_CASE :Optional[int] = """wp"""
a :Tuple = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class __a (lowerCamelCase__):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[Any] = ["""image_processor""", """char_tokenizer"""]
_SCREAMING_SNAKE_CASE :Union[str, Any] = """ViTImageProcessor"""
_SCREAMING_SNAKE_CASE :Optional[Any] = """MgpstrTokenizer"""
def __init__( self , _a=None , _a=None , **_a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , snake_case__ , )
SCREAMING_SNAKE_CASE__ : List[str] = kwargs.pop("""feature_extractor""" )
SCREAMING_SNAKE_CASE__ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer
SCREAMING_SNAKE_CASE__ : Any = AutoTokenizer.from_pretrained("""gpt2""" )
SCREAMING_SNAKE_CASE__ : Tuple = AutoTokenizer.from_pretrained("""bert-base-uncased""" )
super().__init__(snake_case__ , snake_case__ )
def __call__( self , _a=None , _a=None , _a=None , **_a ) -> List[str]:
"""simple docstring"""
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if text is not None:
SCREAMING_SNAKE_CASE__ : str = self.char_tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if text is None:
return inputs
elif images is None:
return encodings
else:
SCREAMING_SNAKE_CASE__ : str = encodings['''input_ids''']
return inputs
def _a ( self , _a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = sequences
SCREAMING_SNAKE_CASE__ : Union[str, Any] = char_preds.size(0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._decode_helper(snake_case__ , """char""" )
SCREAMING_SNAKE_CASE__ : List[str] = self._decode_helper(snake_case__ , """bpe""" )
SCREAMING_SNAKE_CASE__ : Dict = self._decode_helper(snake_case__ , """wp""" )
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ : Dict = []
for i in range(snake_case__ ):
SCREAMING_SNAKE_CASE__ : str = [char_scores[i], bpe_scores[i], wp_scores[i]]
SCREAMING_SNAKE_CASE__ : int = [char_strs[i], bpe_strs[i], wp_strs[i]]
SCREAMING_SNAKE_CASE__ : int = scores.index(max(snake_case__ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
SCREAMING_SNAKE_CASE__ : Any = {}
SCREAMING_SNAKE_CASE__ : List[str] = final_strs
SCREAMING_SNAKE_CASE__ : List[str] = final_scores
SCREAMING_SNAKE_CASE__ : List[str] = char_strs
SCREAMING_SNAKE_CASE__ : str = bpe_strs
SCREAMING_SNAKE_CASE__ : int = wp_strs
return out
def _a ( self , _a , _a ) -> Any:
"""simple docstring"""
if format == DecodeType.CHARACTER:
SCREAMING_SNAKE_CASE__ : Dict = self.char_decode
SCREAMING_SNAKE_CASE__ : Optional[Any] = 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''[s]'''
elif format == DecodeType.BPE:
SCREAMING_SNAKE_CASE__ : List[Any] = self.bpe_decode
SCREAMING_SNAKE_CASE__ : List[str] = 2
SCREAMING_SNAKE_CASE__ : Dict = '''#'''
elif format == DecodeType.WORDPIECE:
SCREAMING_SNAKE_CASE__ : str = self.wp_decode
SCREAMING_SNAKE_CASE__ : Any = 102
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''[SEP]'''
else:
raise ValueError(f'''Format {format} is not supported.''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [], []
SCREAMING_SNAKE_CASE__ : Optional[Any] = pred_logits.size(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = pred_logits.size(1 )
SCREAMING_SNAKE_CASE__ : Tuple = pred_logits.topk(1 , dim=-1 , largest=snake_case__ , sorted=snake_case__ )
SCREAMING_SNAKE_CASE__ : str = preds_index.view(-1 , snake_case__ )[:, 1:]
SCREAMING_SNAKE_CASE__ : Optional[Any] = decoder(snake_case__ )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.nn.functional.softmax(snake_case__ , dim=2 ).max(dim=2 )
SCREAMING_SNAKE_CASE__ : Optional[int] = preds_max_prob[:, 1:]
for index in range(snake_case__ ):
SCREAMING_SNAKE_CASE__ : List[str] = preds_str[index].find(snake_case__ )
SCREAMING_SNAKE_CASE__ : Any = preds_str[index][:pred_eos]
SCREAMING_SNAKE_CASE__ : List[str] = preds_index[index].cpu().tolist()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pred_index.index(snake_case__ ) if eos_token in pred_index else -1
SCREAMING_SNAKE_CASE__ : Any = preds_max_prob[index][: pred_eos_index + 1]
SCREAMING_SNAKE_CASE__ : List[str] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(snake_case__ )
conf_scores.append(snake_case__ )
return dec_strs, conf_scores
def _a ( self , _a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = [seq.replace(""" """ , """""" ) for seq in self.char_tokenizer.batch_decode(snake_case__ )]
return decode_strs
def _a ( self , _a ) -> List[str]:
"""simple docstring"""
return self.bpe_tokenizer.batch_decode(snake_case__ )
def _a ( self , _a ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = [seq.replace(""" """ , """""" ) for seq in self.wp_tokenizer.batch_decode(snake_case__ )]
return decode_strs
| 132 | from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
# TODO Update this
__snake_case = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : Tuple = """esm"""
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=1026 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__="absolute" , snake_case__=True , snake_case__=None , snake_case__=False , snake_case__=False , snake_case__=None , snake_case__=None , **snake_case__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , mask_token_id=snake_case__ , **snake_case__ )
UpperCAmelCase : List[str] =vocab_size
UpperCAmelCase : str =hidden_size
UpperCAmelCase : List[Any] =num_hidden_layers
UpperCAmelCase : Optional[Any] =num_attention_heads
UpperCAmelCase : str =intermediate_size
UpperCAmelCase : Any =hidden_dropout_prob
UpperCAmelCase : int =attention_probs_dropout_prob
UpperCAmelCase : Dict =max_position_embeddings
UpperCAmelCase : List[str] =initializer_range
UpperCAmelCase : Union[str, Any] =layer_norm_eps
UpperCAmelCase : Dict =position_embedding_type
UpperCAmelCase : Optional[Any] =use_cache
UpperCAmelCase : int =emb_layer_norm_before
UpperCAmelCase : List[str] =token_dropout
UpperCAmelCase : Optional[Any] =is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
UpperCAmelCase : Optional[Any] =EsmFoldConfig()
elif isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase : Optional[int] =EsmFoldConfig(**snake_case__ )
UpperCAmelCase : Tuple =esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
UpperCAmelCase : Any =get_default_vocab_list()
else:
UpperCAmelCase : Tuple =vocab_list
else:
UpperCAmelCase : Optional[int] =None
UpperCAmelCase : Union[str, Any] =None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , snake_case__ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =super().to_dict()
if isinstance(self.esmfold_config , snake_case__ ):
UpperCAmelCase : str =self.esmfold_config.to_dict()
return output
@dataclass
class __snake_case :
__lowerCamelCase : str = None
__lowerCamelCase : bool = True
__lowerCamelCase : bool = False
__lowerCamelCase : bool = False
__lowerCamelCase : bool = False
__lowerCamelCase : float = 0
__lowerCamelCase : bool = True
__lowerCamelCase : bool = False
__lowerCamelCase : int = 128
__lowerCamelCase : "TrunkConfig" = None
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
if self.trunk is None:
UpperCAmelCase : str =TrunkConfig()
elif isinstance(self.trunk , snake_case__ ):
UpperCAmelCase : Optional[int] =TrunkConfig(**self.trunk )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =asdict(self )
UpperCAmelCase : Any =self.trunk.to_dict()
return output
@dataclass
class __snake_case :
__lowerCamelCase : int = 48
__lowerCamelCase : int = 1024
__lowerCamelCase : int = 128
__lowerCamelCase : int = 32
__lowerCamelCase : int = 32
__lowerCamelCase : int = 32
__lowerCamelCase : float = 0
__lowerCamelCase : float = 0
__lowerCamelCase : bool = False
__lowerCamelCase : int = 4
__lowerCamelCase : Optional[int] = 128
__lowerCamelCase : "StructureModuleConfig" = None
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
if self.structure_module is None:
UpperCAmelCase : Any =StructureModuleConfig()
elif isinstance(self.structure_module , snake_case__ ):
UpperCAmelCase : str =StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
UpperCAmelCase : Optional[int] =self.sequence_state_dim // self.sequence_head_width
UpperCAmelCase : Any =self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(f'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =asdict(self )
UpperCAmelCase : Tuple =self.structure_module.to_dict()
return output
@dataclass
class __snake_case :
__lowerCamelCase : int = 384
__lowerCamelCase : int = 128
__lowerCamelCase : int = 16
__lowerCamelCase : int = 128
__lowerCamelCase : int = 12
__lowerCamelCase : int = 4
__lowerCamelCase : int = 8
__lowerCamelCase : float = 0.1
__lowerCamelCase : int = 8
__lowerCamelCase : int = 1
__lowerCamelCase : int = 2
__lowerCamelCase : int = 7
__lowerCamelCase : int = 10
__lowerCamelCase : float = 1E-8
__lowerCamelCase : float = 1E5
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return asdict(self )
def lowerCAmelCase_ ( )-> Tuple:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 348 | 0 |
'''simple docstring'''
def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Any ):
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
__UpperCAmelCase : List[str] = str(bin(__lowerCAmelCase ) )[2:] # remove the leading "0b"
__UpperCAmelCase : Any = str(bin(__lowerCAmelCase ) )[2:] # remove the leading "0b"
__UpperCAmelCase : Optional[int] = max(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(__lowerCAmelCase ) , b_binary.zfill(__lowerCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 254 | import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : Optional[int] = (KDPMaDiscreteScheduler,)
__lowerCamelCase : List[str] = 10
def UpperCAmelCase__ ( self , **snake_case__ ) -> str:
'''simple docstring'''
UpperCAmelCase : int ={
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**snake_case__ )
return config
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case__ )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=snake_case__ )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case__ )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =self.scheduler_classes[0]
UpperCAmelCase : Optional[int] =self.get_scheduler_config(prediction_type='''v_prediction''' )
UpperCAmelCase : Optional[Any] =scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase : str =self.dummy_model()
UpperCAmelCase : Optional[Any] =self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase : Union[str, Any] =sample.to(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase : str =scheduler.scale_model_input(snake_case__ , snake_case__ )
UpperCAmelCase : Any =model(snake_case__ , snake_case__ )
UpperCAmelCase : Union[str, Any] =scheduler.step(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase : int =output.prev_sample
UpperCAmelCase : Dict =torch.sum(torch.abs(snake_case__ ) )
UpperCAmelCase : Optional[Any] =torch.mean(torch.abs(snake_case__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.69_34e-07 ) < 1e-2
assert abs(result_mean.item() - 6.11_12e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0002 ) < 1e-3
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
if torch_device == "mps":
return
UpperCAmelCase : Any =self.scheduler_classes[0]
UpperCAmelCase : Optional[int] =self.get_scheduler_config()
UpperCAmelCase : Optional[Any] =scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase : Optional[int] =self.dummy_model()
UpperCAmelCase : Union[str, Any] =self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase : str =sample.to(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase : Dict =scheduler.scale_model_input(snake_case__ , snake_case__ )
UpperCAmelCase : Union[str, Any] =model(snake_case__ , snake_case__ )
UpperCAmelCase : List[str] =scheduler.step(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase : Optional[int] =output.prev_sample
UpperCAmelCase : Any =torch.sum(torch.abs(snake_case__ ) )
UpperCAmelCase : Union[str, Any] =torch.mean(torch.abs(snake_case__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
if torch_device == "mps":
return
UpperCAmelCase : List[Any] =self.scheduler_classes[0]
UpperCAmelCase : Dict =self.get_scheduler_config()
UpperCAmelCase : List[str] =scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case__ )
UpperCAmelCase : int =self.dummy_model()
UpperCAmelCase : Tuple =self.dummy_sample_deter.to(snake_case__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
UpperCAmelCase : Optional[Any] =scheduler.scale_model_input(snake_case__ , snake_case__ )
UpperCAmelCase : int =model(snake_case__ , snake_case__ )
UpperCAmelCase : str =scheduler.step(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase : List[str] =output.prev_sample
UpperCAmelCase : List[str] =torch.sum(torch.abs(snake_case__ ) )
UpperCAmelCase : Dict =torch.mean(torch.abs(snake_case__ ) )
if str(snake_case__ ).startswith('''cpu''' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
| 348 | 0 |
'''simple docstring'''
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler')
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : int = True , _UpperCAmelCase : Tuple = False ):
"""simple docstring"""
UpperCAmelCase__ = scheduler
UpperCAmelCase__ = optimizers if isinstance(snake_case__ , (list, tuple) ) else [optimizers]
UpperCAmelCase__ = split_batches
UpperCAmelCase__ = step_with_optimizer
UpperCAmelCase__ = GradientState()
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : List[Any] ):
"""simple docstring"""
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*snake_case__ , **snake_case__ )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*snake_case__ , **snake_case__ )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
UpperCAmelCase__ = AcceleratorState().num_processes
for _ in range(snake_case__ ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , """total_steps""" ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*snake_case__ , **snake_case__ )
else:
self.scheduler.step(*snake_case__ , **snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
return self.scheduler.get_last_lr()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
return self.scheduler.state_dict()
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
self.scheduler.load_state_dict(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
return self.scheduler.get_lr()
def SCREAMING_SNAKE_CASE__ ( self : str , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
return self.scheduler.print_lr(*snake_case__ , **snake_case__ )
| 346 | import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Any =FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
UpperCAmelCase : Tuple =AutoTokenizer.from_pretrained('''google/mt5-small''' )
UpperCAmelCase : List[str] =tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
UpperCAmelCase : List[Any] =tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
UpperCAmelCase : Union[str, Any] =shift_tokens_right(snake_case__ , model.config.pad_token_id , model.config.decoder_start_token_id )
UpperCAmelCase : List[str] =model(snake_case__ , decoder_input_ids=snake_case__ ).logits
UpperCAmelCase : Any =optax.softmax_cross_entropy(snake_case__ , onehot(snake_case__ , logits.shape[-1] ) ).mean()
UpperCAmelCase : Union[str, Any] =-(labels.shape[-1] * loss.item())
UpperCAmelCase : List[str] =-84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 348 | 0 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class snake_case__ ( lowerCamelCase__ ):
lowercase__ : torch.FloatTensor
lowercase__ : Optional[torch.FloatTensor] = None
def UpperCamelCase ( _A, _A=0.999, _A="cosine", ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(_A ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_A ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'Unsupported alpha_tranform_type: {alpha_transform_type}' )
__magic_name__ : Optional[int] = []
for i in range(__lowerCAmelCase ):
__magic_name__ : Tuple = i / num_diffusion_timesteps
__magic_name__ : List[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__lowerCAmelCase ) / alpha_bar_fn(__lowerCAmelCase ), __lowerCAmelCase ) )
return torch.tensor(__lowerCAmelCase, dtype=torch.floataa )
class snake_case__ ( lowerCamelCase__ , lowerCamelCase__ ):
@register_to_config
def __init__( self , lowerCAmelCase__ = 10_00 , lowerCAmelCase__ = "fixed_small_log" , lowerCAmelCase__ = True , lowerCAmelCase__ = 1.0 , lowerCAmelCase__ = "epsilon" , lowerCAmelCase__ = "squaredcos_cap_v2" , ) -> Optional[int]:
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("""UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'""" )
__magic_name__ : Any = betas_for_alpha_bar(snake_case__ )
__magic_name__ : Optional[int] = 1.0 - self.betas
__magic_name__ : Optional[int] = torch.cumprod(self.alphas , dim=0 )
__magic_name__ : Optional[int] = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
__magic_name__ : Optional[Any] = 1.0
# setable values
__magic_name__ : Any = None
__magic_name__ : Union[str, Any] = torch.from_numpy(np.arange(0 , snake_case__ )[::-1].copy() )
__magic_name__ : List[str] = variance_type
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> torch.FloatTensor:
return sample
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Optional[int]:
__magic_name__ : Union[str, Any] = num_inference_steps
__magic_name__ : Any = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
__magic_name__ : List[Any] = (np.arange(0 , snake_case__ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
__magic_name__ : Tuple = torch.from_numpy(snake_case__ ).to(snake_case__ )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None ) -> Tuple:
if prev_timestep is None:
__magic_name__ : str = t - 1
__magic_name__ : Optional[int] = self.alphas_cumprod[t]
__magic_name__ : Dict = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__magic_name__ : Dict = 1 - alpha_prod_t
__magic_name__ : int = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__magic_name__ : List[str] = self.betas[t]
else:
__magic_name__ : Dict = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__magic_name__ : Dict = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
__magic_name__ : int = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
__magic_name__ : Union[str, Any] = torch.log(torch.clamp(snake_case__ , min=1e-2_0 ) )
__magic_name__ : Optional[Any] = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
__magic_name__ : Any = variance.log()
__magic_name__ : Dict = beta.log()
__magic_name__ : Dict = (predicted_variance + 1) / 2
__magic_name__ : List[Any] = frac * max_log + (1 - frac) * min_log
return variance
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__=None , lowerCAmelCase__ = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
__magic_name__ : str = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
__magic_name__ : Union[str, Any] = torch.split(snake_case__ , sample.shape[1] , dim=1 )
else:
__magic_name__ : int = None
# 1. compute alphas, betas
if prev_timestep is None:
__magic_name__ : List[str] = t - 1
__magic_name__ : Optional[Any] = self.alphas_cumprod[t]
__magic_name__ : List[Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__magic_name__ : Union[str, Any] = 1 - alpha_prod_t
__magic_name__ : int = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__magic_name__ : List[Any] = self.betas[t]
__magic_name__ : Tuple = self.alphas[t]
else:
__magic_name__ : str = 1 - alpha_prod_t / alpha_prod_t_prev
__magic_name__ : Optional[int] = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__magic_name__ : Tuple = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__magic_name__ : Union[str, Any] = model_output
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'
""" for the UnCLIPScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__magic_name__ : Dict = torch.clamp(
snake_case__ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__magic_name__ : Optional[Any] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
__magic_name__ : List[str] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__magic_name__ : List[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__magic_name__ : List[Any] = 0
if t > 0:
__magic_name__ : str = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=snake_case__ , device=model_output.device )
__magic_name__ : Optional[int] = self._get_variance(
snake_case__ , predicted_variance=snake_case__ , prev_timestep=snake_case__ , )
if self.variance_type == "fixed_small_log":
__magic_name__ : Optional[Any] = variance
elif self.variance_type == "learned_range":
__magic_name__ : Tuple = (0.5 * variance).exp()
else:
raise ValueError(
F'variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'
""" for the UnCLIPScheduler.""" )
__magic_name__ : List[Any] = variance * variance_noise
__magic_name__ : int = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> torch.FloatTensor:
__magic_name__ : str = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
__magic_name__ : int = timesteps.to(original_samples.device )
__magic_name__ : Union[str, Any] = alphas_cumprod[timesteps] ** 0.5
__magic_name__ : Any = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
__magic_name__ : List[str] = sqrt_alpha_prod.unsqueeze(-1 )
__magic_name__ : Any = (1 - alphas_cumprod[timesteps]) ** 0.5
__magic_name__ : Optional[int] = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
__magic_name__ : int = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
__magic_name__ : Any = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 342 | import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __snake_case ( lowerCamelCase__ , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
@property
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : List[Any] =ort.SessionOptions()
UpperCAmelCase : Optional[int] =False
return options
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : int =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
UpperCAmelCase : Optional[Any] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
UpperCAmelCase : List[str] =OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase : Dict ='''A red cat sitting on a park bench'''
UpperCAmelCase : int =np.random.RandomState(0 )
UpperCAmelCase : Any =pipe(
prompt=snake_case__ , image=snake_case__ , mask_image=snake_case__ , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case__ , output_type='''np''' , )
UpperCAmelCase : Dict =output.images
UpperCAmelCase : Optional[int] =images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
UpperCAmelCase : Tuple =np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[str] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
UpperCAmelCase : Tuple =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
UpperCAmelCase : List[str] =LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
UpperCAmelCase : int =OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=snake_case__ , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase : Union[str, Any] ='''A red cat sitting on a park bench'''
UpperCAmelCase : int =np.random.RandomState(0 )
UpperCAmelCase : str =pipe(
prompt=snake_case__ , image=snake_case__ , mask_image=snake_case__ , guidance_scale=7.5 , num_inference_steps=20 , generator=snake_case__ , output_type='''np''' , )
UpperCAmelCase : Dict =output.images
UpperCAmelCase : int =images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
UpperCAmelCase : Union[str, Any] =np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 348 | 0 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
if partitions <= 0:
raise ValueError("""partitions must be a positive number!""" )
if partitions > number_of_bytes:
raise ValueError("""partitions can not > number_of_bytes!""" )
lowerCAmelCase = number_of_bytes // partitions
lowerCAmelCase = []
for i in range(__lowerCAmelCase ):
lowerCAmelCase = i * bytes_per_partition + 1
lowerCAmelCase = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46 | from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def lowerCAmelCase_ ( )-> int:
'''simple docstring'''
UpperCAmelCase : str ={
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7],
}
UpperCAmelCase : Union[str, Any] =Dataset.from_dict(__lowerCAmelCase )
return dataset
class __snake_case ( lowerCamelCase__ ):
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : List[str] =get_dataset()
UpperCAmelCase : Optional[int] =make_duplicate_clusters(snake_case__ , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : str =get_dataset()
UpperCAmelCase , UpperCAmelCase : Tuple =deduplicate_dataset(snake_case__ )
self.assertEqual(len(snake_case__ ) , 2 )
print(snake_case__ )
self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , snake_case__ )
| 348 | 0 |
import argparse
import copy
def __lowerCamelCase ( __a :Optional[int] ) -> Optional[Any]:
"""simple docstring"""
A__ = {}
with open(__lowerCAmelCase ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
A__ = []
_list.append([line.split()[1], line.split()[2]] )
A__ = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
A__ = []
_list.append([line.split()[0], line.split()[2]] )
A__ = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def __lowerCamelCase ( __a :Dict , __a :Any ) -> Union[str, Any]:
"""simple docstring"""
with open(__lowerCAmelCase ) as f:
A__ = f.read(1 )
A__ = start_node
A__ = []
A__ = start_node
A__ = 0
while visiting not in first_solution:
A__ = 1_0_0_0_0
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__lowerCAmelCase ) and k[0] not in first_solution:
A__ = k[1]
A__ = k[0]
first_solution.append(__lowerCAmelCase )
A__ = distance_of_first_solution + int(__lowerCAmelCase )
A__ = best_node
first_solution.append(__lowerCAmelCase )
A__ = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
A__ = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0_0_0_0
)
return first_solution, distance_of_first_solution
def __lowerCamelCase ( __a :Dict , __a :Optional[int] ) -> str:
"""simple docstring"""
A__ = []
for n in solution[1:-1]:
A__ = solution.index(__lowerCAmelCase )
for kn in solution[1:-1]:
A__ = solution.index(__lowerCAmelCase )
if n == kn:
continue
A__ = copy.deepcopy(__lowerCAmelCase )
A__ = kn
A__ = n
A__ = 0
for k in _tmp[:-1]:
A__ = _tmp[_tmp.index(__lowerCAmelCase ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
A__ = distance + int(i[1] )
_tmp.append(__lowerCAmelCase )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
A__ = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __a : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def __lowerCamelCase ( __a :int , __a :List[Any] , __a :Any , __a :Any , __a :Union[str, Any] ) -> int:
"""simple docstring"""
A__ = 1
A__ = first_solution
A__ = []
A__ = distance_of_first_solution
A__ = solution
while count <= iters:
A__ = find_neighborhood(__lowerCAmelCase , __lowerCAmelCase )
A__ = 0
A__ = neighborhood[index_of_best_solution]
A__ = len(__lowerCAmelCase ) - 1
A__ = False
while not found:
A__ = 0
while i < len(__lowerCAmelCase ):
if best_solution[i] != solution[i]:
A__ = best_solution[i]
A__ = solution[i]
break
A__ = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
A__ = True
A__ = best_solution[:-1]
A__ = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
A__ = cost
A__ = solution
else:
A__ = index_of_best_solution + 1
A__ = neighborhood[index_of_best_solution]
if len(__lowerCAmelCase ) >= size:
tabu_list.pop(0 )
A__ = count + 1
return best_solution_ever, best_cost
def __lowerCamelCase ( __a :List[str]=None ) -> Any:
"""simple docstring"""
A__ = generate_neighbours(args.File )
A__ = generate_first_solution(
args.File , __lowerCAmelCase )
A__ = tabu_search(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , args.Iterations , args.Size , )
print(F'Best solution: {best_sol}, with total distance: {best_cost}.' )
if __name__ == "__main__":
A : Tuple = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 274 | from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
class __snake_case ( lowerCamelCase__ , lowerCamelCase__ ):
@register_to_config
def __init__( self , snake_case__ , snake_case__ = None , snake_case__ = None ) -> str:
'''simple docstring'''
super().__init__()
UpperCAmelCase : Optional[Any] =learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
UpperCAmelCase : Any =torch.zeros(snake_case__ , snake_case__ )
else:
UpperCAmelCase : Union[str, Any] =None
UpperCAmelCase : Optional[int] =torch.nn.Parameter(snake_case__ )
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : VQModel
__lowerCamelCase : CLIPTextModel
__lowerCamelCase : CLIPTokenizer
__lowerCamelCase : TransformeraDModel
__lowerCamelCase : LearnedClassifierFreeSamplingEmbeddings
__lowerCamelCase : VQDiffusionScheduler
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> int:
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=snake_case__ , transformer=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , scheduler=snake_case__ , learned_classifier_free_sampling_embeddings=snake_case__ , )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : int =len(snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else 1
# get prompt text embeddings
UpperCAmelCase : Optional[int] =self.tokenizer(
snake_case__ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
UpperCAmelCase : int =text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCAmelCase : List[str] =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCAmelCase : Optional[Any] =text_input_ids[:, : self.tokenizer.model_max_length]
UpperCAmelCase : List[Any] =self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
UpperCAmelCase : int =prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=snake_case__ )
# duplicate text embeddings for each generation per prompt
UpperCAmelCase : int =prompt_embeds.repeat_interleave(snake_case__ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
UpperCAmelCase : Optional[int] =self.learned_classifier_free_sampling_embeddings.embeddings
UpperCAmelCase : str =negative_prompt_embeds.unsqueeze(0 ).repeat(snake_case__ , 1 , 1 )
else:
UpperCAmelCase : str =[''''''] * batch_size
UpperCAmelCase : Tuple =text_input_ids.shape[-1]
UpperCAmelCase : Optional[Any] =self.tokenizer(
snake_case__ , padding='''max_length''' , max_length=snake_case__ , truncation=snake_case__ , return_tensors='''pt''' , )
UpperCAmelCase : Optional[Any] =self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
UpperCAmelCase : Optional[int] =negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=snake_case__ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase : Optional[Any] =negative_prompt_embeds.shape[1]
UpperCAmelCase : Union[str, Any] =negative_prompt_embeds.repeat(1 , snake_case__ , 1 )
UpperCAmelCase : Optional[Any] =negative_prompt_embeds.view(batch_size * num_images_per_prompt , snake_case__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase : int =torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , snake_case__ , snake_case__ = 100 , snake_case__ = 5.0 , snake_case__ = 1.0 , snake_case__ = 1 , snake_case__ = None , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , snake_case__ = None , snake_case__ = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
if isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase : Optional[int] =1
elif isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase : Tuple =len(snake_case__ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(snake_case__ )}''' )
UpperCAmelCase : Tuple =batch_size * num_images_per_prompt
UpperCAmelCase : List[str] =guidance_scale > 1.0
UpperCAmelCase : List[Any] =self._encode_prompt(snake_case__ , snake_case__ , snake_case__ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case__ , snake_case__ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(snake_case__ )}.''' )
# get the initial completely masked latents unless the user supplied it
UpperCAmelCase : int =(batch_size, self.transformer.num_latent_pixels)
if latents is None:
UpperCAmelCase : Union[str, Any] =self.transformer.num_vector_embeds - 1
UpperCAmelCase : str =torch.full(snake_case__ , snake_case__ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
f''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
UpperCAmelCase : Any =latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(snake_case__ , device=self.device )
UpperCAmelCase : Any =self.scheduler.timesteps.to(self.device )
UpperCAmelCase : Optional[int] =latents
for i, t in enumerate(self.progress_bar(snake_case__ ) ):
# expand the sample if we are doing classifier free guidance
UpperCAmelCase : Optional[Any] =torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
UpperCAmelCase : Optional[int] =self.transformer(snake_case__ , encoder_hidden_states=snake_case__ , timestep=snake_case__ ).sample
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase : str =model_output.chunk(2 )
UpperCAmelCase : Optional[int] =model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(snake_case__ , dim=1 , keepdim=snake_case__ )
UpperCAmelCase : Tuple =self.truncate(snake_case__ , snake_case__ )
# remove `log(0)`'s (`-inf`s)
UpperCAmelCase : Optional[Any] =model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase : int =self.scheduler.step(snake_case__ , timestep=snake_case__ , sample=snake_case__ , generator=snake_case__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase : Optional[int] =self.vqvae.config.vq_embed_dim
UpperCAmelCase : Optional[Any] =(batch_size, self.transformer.height, self.transformer.width, embedding_channels)
UpperCAmelCase : Dict =self.vqvae.quantize.get_codebook_entry(snake_case__ , shape=snake_case__ )
UpperCAmelCase : Tuple =self.vqvae.decode(snake_case__ , force_not_quantize=snake_case__ ).sample
UpperCAmelCase : Union[str, Any] =(image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase : Any =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase : List[str] =self.numpy_to_pil(snake_case__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case__ )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> torch.FloatTensor:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : int =torch.sort(snake_case__ , 1 , descending=snake_case__ )
UpperCAmelCase : Union[str, Any] =torch.exp(snake_case__ )
UpperCAmelCase : Union[str, Any] =sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
UpperCAmelCase : Optional[Any] =torch.full_like(keep_mask[:, 0:1, :] , snake_case__ )
UpperCAmelCase : Tuple =torch.cat((all_true, keep_mask) , dim=1 )
UpperCAmelCase : int =keep_mask[:, :-1, :]
UpperCAmelCase : int =keep_mask.gather(1 , indices.argsort(1 ) )
UpperCAmelCase : Dict =log_p_x_0.clone()
UpperCAmelCase : List[Any] =-torch.inf # -inf = log(0)
return rv
| 348 | 0 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
UpperCAmelCase = logging.get_logger(__name__)
class A_ ( lowerCamelCase__ ):
'''simple docstring'''
_UpperCamelCase : List[str] = ["""input_features""", """is_longer"""]
def __init__( self , snake_case=64 , snake_case=4_8000 , snake_case=480 , snake_case=10 , snake_case=1024 , snake_case=0.0 , snake_case=False , snake_case = 0 , snake_case = 1_4000 , snake_case = None , snake_case = "fusion" , snake_case = "repeatpad" , **snake_case , ):
super().__init__(
feature_size=snake_case__ , sampling_rate=snake_case__ , padding_value=snake_case__ , return_attention_mask=snake_case__ , **snake_case__ , )
lowercase = top_db
lowercase = truncation
lowercase = padding
lowercase = fft_window_size
lowercase = (fft_window_size >> 1) + 1
lowercase = hop_length
lowercase = max_length_s
lowercase = max_length_s * sampling_rate
lowercase = sampling_rate
lowercase = frequency_min
lowercase = frequency_max
lowercase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case__ , min_frequency=snake_case__ , max_frequency=snake_case__ , sampling_rate=snake_case__ , norm=snake_case__ , mel_scale='htk' , )
lowercase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case__ , min_frequency=snake_case__ , max_frequency=snake_case__ , sampling_rate=snake_case__ , norm='slaney' , mel_scale='slaney' , )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = copy.deepcopy(self.__dict__ )
lowercase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = spectrogram(
snake_case__ , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=snake_case__ , log_mel='dB' , )
return log_mel_spectrogram.T
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
lowercase = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase = [0]
# randomly choose index for each part
lowercase = np.random.choice(ranges[0] )
lowercase = np.random.choice(ranges[1] )
lowercase = np.random.choice(ranges[2] )
lowercase = mel[idx_front : idx_front + chunk_frames, :]
lowercase = mel[idx_middle : idx_middle + chunk_frames, :]
lowercase = mel[idx_back : idx_back + chunk_frames, :]
lowercase = torch.tensor(mel[None, None, :] )
lowercase = torch.nn.functional.interpolate(
snake_case__ , size=[chunk_frames, 64] , mode='bilinear' , align_corners=snake_case__ )
lowercase = mel_shrink[0][0].numpy()
lowercase = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowercase = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowercase = len(snake_case__ ) - max_length
lowercase = np.random.randint(0 , overflow + 1 )
lowercase = waveform[idx : idx + max_length]
lowercase = self._np_extract_fbank_features(snake_case__ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowercase = self._np_extract_fbank_features(snake_case__ , self.mel_filters )
lowercase = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowercase = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowercase = np.stack([mel, mel, mel, mel] , axis=0 )
lowercase = False
else:
lowercase = self._random_mel_fusion(snake_case__ , snake_case__ , snake_case__ )
lowercase = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
lowercase = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowercase = int(max_length / len(snake_case__ ) )
lowercase = np.stack(np.tile(snake_case__ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowercase = int(max_length / len(snake_case__ ) )
lowercase = np.stack(np.tile(snake_case__ , snake_case__ ) )
lowercase = np.pad(snake_case__ , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
lowercase = self._np_extract_fbank_features(snake_case__ , self.mel_filters )
lowercase = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
lowercase = self._np_extract_fbank_features(snake_case__ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , **snake_case , ):
lowercase = truncation if truncation is not None else self.truncation
lowercase = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
lowercase = isinstance(snake_case__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
lowercase = is_batched_numpy or (
isinstance(snake_case__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase = [np.asarray(snake_case__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case__ , np.ndarray ):
lowercase = np.asarray(snake_case__ , dtype=np.floataa )
elif isinstance(snake_case__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase = [np.asarray(snake_case__ )]
# convert to mel spectrogram, truncate and pad if needed.
lowercase = [
self._get_input_mel(snake_case__ , max_length if max_length else self.nb_max_samples , snake_case__ , snake_case__ )
for waveform in raw_speech
]
lowercase = []
lowercase = []
for mel, longer in padded_inputs:
input_mel.append(snake_case__ )
is_longer.append(snake_case__ )
if truncation == "fusion" and sum(snake_case__ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowercase = np.random.randint(0 , len(snake_case__ ) )
lowercase = True
if isinstance(input_mel[0] , snake_case__ ):
lowercase = [np.asarray(snake_case__ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowercase = [[longer] for longer in is_longer]
lowercase = {'''input_features''': input_mel, '''is_longer''': is_longer}
lowercase = BatchFeature(snake_case__ )
if return_tensors is not None:
lowercase = input_features.convert_to_tensors(snake_case__ )
return input_features
| 195 | import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __snake_case ( unittest.TestCase ):
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : Any =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Tuple =self.dummy_uncond_unet
UpperCAmelCase : Optional[int] =KarrasVeScheduler()
UpperCAmelCase : List[Any] =KarrasVePipeline(unet=snake_case__ , scheduler=snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase : List[str] =torch.manual_seed(0 )
UpperCAmelCase : List[str] =pipe(num_inference_steps=2 , generator=snake_case__ , output_type='''numpy''' ).images
UpperCAmelCase : str =torch.manual_seed(0 )
UpperCAmelCase : str =pipe(num_inference_steps=2 , generator=snake_case__ , output_type='''numpy''' , return_dict=snake_case__ )[0]
UpperCAmelCase : Any =image[0, -3:, -3:, -1]
UpperCAmelCase : List[str] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase : int =np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Tuple ='''google/ncsnpp-celebahq-256'''
UpperCAmelCase : int =UNetaDModel.from_pretrained(snake_case__ )
UpperCAmelCase : Dict =KarrasVeScheduler()
UpperCAmelCase : Union[str, Any] =KarrasVePipeline(unet=snake_case__ , scheduler=snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase : Any =torch.manual_seed(0 )
UpperCAmelCase : Tuple =pipe(num_inference_steps=20 , generator=snake_case__ , output_type='''numpy''' ).images
UpperCAmelCase : Optional[int] =image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Tuple =np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 348 | 0 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class _snake_case :
def __init__( self , _lowerCamelCase , _lowerCamelCase=14 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=32 , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=0.02 , ):
UpperCAmelCase__ : str = parent
UpperCAmelCase__ : Tuple = batch_size
UpperCAmelCase__ : Optional[int] = seq_length
UpperCAmelCase__ : Optional[int] = is_training
UpperCAmelCase__ : Tuple = use_input_mask
UpperCAmelCase__ : List[Any] = use_token_type_ids
UpperCAmelCase__ : Optional[Any] = use_labels
UpperCAmelCase__ : Union[str, Any] = vocab_size
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Optional[int] = rotary_dim
UpperCAmelCase__ : Union[str, Any] = num_hidden_layers
UpperCAmelCase__ : List[Any] = num_attention_heads
UpperCAmelCase__ : Dict = intermediate_size
UpperCAmelCase__ : Union[str, Any] = hidden_act
UpperCAmelCase__ : Any = hidden_dropout_prob
UpperCAmelCase__ : Dict = attention_probs_dropout_prob
UpperCAmelCase__ : Union[str, Any] = max_position_embeddings
UpperCAmelCase__ : str = initializer_range
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : List[Any] = vocab_size - 1
UpperCAmelCase__ : Optional[Any] = vocab_size - 1
UpperCAmelCase__ : List[Any] = vocab_size - 1
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCAmelCase__ : List[Any] = None
if self.use_input_mask:
UpperCAmelCase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length])
UpperCAmelCase__ : Dict = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=snake_case__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def snake_case__ ( self):
UpperCAmelCase__ : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase__ : Union[str, Any] = config_and_inputs
UpperCAmelCase__ : Tuple = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : Any = 20
UpperCAmelCase__ : Any = model_class_name(snake_case__)
UpperCAmelCase__ : str = model.init_cache(input_ids.shape[0] , snake_case__)
UpperCAmelCase__ : Any = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""")
UpperCAmelCase__ : Optional[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1))
UpperCAmelCase__ : Optional[Any] = model(
input_ids[:, :-1] , attention_mask=snake_case__ , past_key_values=snake_case__ , position_ids=snake_case__ , )
UpperCAmelCase__ : List[str] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""")
UpperCAmelCase__ : Optional[Any] = model(
input_ids[:, -1:] , attention_mask=snake_case__ , past_key_values=outputs_cache.past_key_values , position_ids=snake_case__ , )
UpperCAmelCase__ : List[Any] = model(snake_case__)
UpperCAmelCase__ : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''')
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : Dict = 20
UpperCAmelCase__ : Dict = model_class_name(snake_case__)
UpperCAmelCase__ : Tuple = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]))] , axis=-1 , )
UpperCAmelCase__ : Dict = model.init_cache(input_ids.shape[0] , snake_case__)
UpperCAmelCase__ : int = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1))
UpperCAmelCase__ : Optional[Any] = model(
input_ids[:, :-1] , attention_mask=snake_case__ , past_key_values=snake_case__ , position_ids=snake_case__ , )
UpperCAmelCase__ : Any = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""")
UpperCAmelCase__ : str = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=snake_case__ , position_ids=snake_case__ , )
UpperCAmelCase__ : Any = model(snake_case__ , attention_mask=snake_case__)
UpperCAmelCase__ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''')
@require_flax
class _snake_case ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowerCAmelCase :Tuple = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
lowerCAmelCase :Optional[Any] = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = FlaxGPTJModelTester(self)
def snake_case__ ( self):
for model_class_name in self.all_model_classes:
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(snake_case__ , snake_case__ , snake_case__ , snake_case__)
def snake_case__ ( self):
for model_class_name in self.all_model_classes:
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
snake_case__ , snake_case__ , snake_case__ , snake_case__)
@tooslow
def snake_case__ ( self):
UpperCAmelCase__ : Tuple = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""")
UpperCAmelCase__ : Optional[Any] = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=snake_case__ , truncation=snake_case__)
UpperCAmelCase__ : Optional[int] = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""")
UpperCAmelCase__ : str = False
UpperCAmelCase__ : Union[str, Any] = model.config.eos_token_id
UpperCAmelCase__ : List[Any] = jax.jit(model.generate)
UpperCAmelCase__ : Dict = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id).sequences
UpperCAmelCase__ : Any = tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__)
UpperCAmelCase__ : Tuple = [
'''Hello this is a long string of text.\n\nI\'m trying to get the text of the''',
'''Hey, I\'m a little late to the party. I\'m going to''',
]
self.assertListEqual(snake_case__ , snake_case__)
@is_pt_flax_cross_test
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# prepare inputs
UpperCAmelCase__ : Union[str, Any] = self._prepare_for_class(snake_case__ , snake_case__)
UpperCAmelCase__ : List[str] = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
UpperCAmelCase__ : Any = model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCAmelCase__ : Any = getattr(snake_case__ , snake_case__)
UpperCAmelCase__ : Union[str, Any] = pt_inputs['''input_ids'''].shape
UpperCAmelCase__ : Tuple = np.random.randint(0 , seq_length - 1 , size=(batch_size,))
for batch_idx, start_index in enumerate(snake_case__):
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : Optional[int] = 1
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Union[str, Any] = 1
UpperCAmelCase__ : List[str] = pt_model_class(snake_case__).eval()
UpperCAmelCase__ : Optional[int] = model_class(snake_case__ , dtype=jnp.floataa)
UpperCAmelCase__ : Any = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , snake_case__)
UpperCAmelCase__ : Union[str, Any] = fx_state
with torch.no_grad():
UpperCAmelCase__ : Any = pt_model(**snake_case__).to_tuple()
UpperCAmelCase__ : Dict = fx_model(**snake_case__).to_tuple()
self.assertEqual(len(snake_case__) , len(snake_case__) , """Output lengths differ between Flax and PyTorch""")
for fx_output, pt_output in zip(snake_case__ , snake_case__):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2)
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(snake_case__)
UpperCAmelCase__ : str = model_class.from_pretrained(snake_case__ , from_pt=snake_case__)
UpperCAmelCase__ : int = fx_model_loaded(**snake_case__).to_tuple()
self.assertEqual(
len(snake_case__) , len(snake_case__) , """Output lengths differ between Flax and PyTorch""")
for fx_output_loaded, pt_output in zip(snake_case__ , snake_case__):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2)
@is_pt_flax_cross_test
def snake_case__ ( self):
UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# prepare inputs
UpperCAmelCase__ : Union[str, Any] = self._prepare_for_class(snake_case__ , snake_case__)
UpperCAmelCase__ : Union[str, Any] = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
UpperCAmelCase__ : int = model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCAmelCase__ : int = getattr(snake_case__ , snake_case__)
UpperCAmelCase__ : Dict = pt_model_class(snake_case__).eval()
UpperCAmelCase__ : str = model_class(snake_case__ , dtype=jnp.floataa)
UpperCAmelCase__ : Optional[Any] = load_flax_weights_in_pytorch_model(snake_case__ , fx_model.params)
UpperCAmelCase__ : Optional[int] = pt_inputs['''input_ids'''].shape
UpperCAmelCase__ : Optional[int] = np.random.randint(0 , seq_length - 1 , size=(batch_size,))
for batch_idx, start_index in enumerate(snake_case__):
UpperCAmelCase__ : str = 0
UpperCAmelCase__ : Any = 1
UpperCAmelCase__ : List[Any] = 0
UpperCAmelCase__ : Tuple = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
UpperCAmelCase__ : Optional[Any] = pt_model(**snake_case__).to_tuple()
UpperCAmelCase__ : List[Any] = fx_model(**snake_case__).to_tuple()
self.assertEqual(len(snake_case__) , len(snake_case__) , """Output lengths differ between Flax and PyTorch""")
for fx_output, pt_output in zip(snake_case__ , snake_case__):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2)
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(snake_case__)
UpperCAmelCase__ : Tuple = pt_model_class.from_pretrained(snake_case__ , from_flax=snake_case__)
with torch.no_grad():
UpperCAmelCase__ : Any = pt_model_loaded(**snake_case__).to_tuple()
self.assertEqual(
len(snake_case__) , len(snake_case__) , """Output lengths differ between Flax and PyTorch""")
for fx_output, pt_output in zip(snake_case__ , snake_case__):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2)
@tooslow
def snake_case__ ( self):
for model_class_name in self.all_model_classes:
UpperCAmelCase__ : str = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""")
UpperCAmelCase__ : Tuple = model(np.ones((1, 1)))
self.assertIsNotNone(snake_case__) | 163 | import qiskit
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> qiskit.result.counts.Counts:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =qiskit.Aer.get_backend('''aer_simulator''' )
UpperCAmelCase : List[str] =qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
UpperCAmelCase : Dict =qiskit.execute(__lowerCAmelCase , __lowerCAmelCase , shots=10_00 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(__lowerCAmelCase )
if __name__ == "__main__":
__snake_case = half_adder(1, 1)
print(f'Half Adder Output Qubit Counts: {counts}')
| 348 | 0 |
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class __snake_case :
__lowerCamelCase : str = BlenderbotConfig
__lowerCamelCase : Optional[Any] = {}
__lowerCamelCase : Optional[int] = """gelu"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=False , snake_case__=99 , snake_case__=32 , snake_case__=2 , snake_case__=4 , snake_case__=37 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=20 , snake_case__=2 , snake_case__=1 , snake_case__=0 , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =parent
UpperCAmelCase : Optional[int] =batch_size
UpperCAmelCase : Dict =seq_length
UpperCAmelCase : Optional[Any] =is_training
UpperCAmelCase : List[str] =use_labels
UpperCAmelCase : List[Any] =vocab_size
UpperCAmelCase : Optional[int] =hidden_size
UpperCAmelCase : Tuple =num_hidden_layers
UpperCAmelCase : Any =num_attention_heads
UpperCAmelCase : Optional[int] =intermediate_size
UpperCAmelCase : str =hidden_dropout_prob
UpperCAmelCase : Optional[int] =attention_probs_dropout_prob
UpperCAmelCase : str =max_position_embeddings
UpperCAmelCase : List[Any] =eos_token_id
UpperCAmelCase : Optional[int] =pad_token_id
UpperCAmelCase : Tuple =bos_token_id
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[Any] =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase : List[Any] =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase : Tuple =tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Optional[Any] =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase : List[str] =prepare_blenderbot_inputs_dict(snake_case__ , snake_case__ , snake_case__ )
return config, inputs_dict
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> int:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =TFBlenderbotModel(config=snake_case__ ).get_decoder()
UpperCAmelCase : Any =inputs_dict['''input_ids''']
UpperCAmelCase : str =input_ids[:1, :]
UpperCAmelCase : Tuple =inputs_dict['''attention_mask'''][:1, :]
UpperCAmelCase : Tuple =inputs_dict['''head_mask''']
UpperCAmelCase : List[Any] =1
# first forward pass
UpperCAmelCase : List[str] =model(snake_case__ , attention_mask=snake_case__ , head_mask=snake_case__ , use_cache=snake_case__ )
UpperCAmelCase , UpperCAmelCase : str =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase : Union[str, Any] =ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase : List[Any] =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase : Tuple =tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase : int =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase : Optional[int] =model(snake_case__ , attention_mask=snake_case__ )[0]
UpperCAmelCase : str =model(snake_case__ , attention_mask=snake_case__ , past_key_values=snake_case__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase : List[Any] =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase : List[Any] =output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase : Dict =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(snake_case__ , snake_case__ , rtol=1e-3 )
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , )-> str:
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase : int =tf.cast(tf.math.not_equal(__lowerCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase : Tuple =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase : str =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase : Union[str, Any] =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase : int =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __snake_case ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : List[str] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__lowerCamelCase : Dict = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__lowerCamelCase : Dict = (
{
"""conversational""": TFBlenderbotForConditionalGeneration,
"""feature-extraction""": TFBlenderbotModel,
"""summarization""": TFBlenderbotForConditionalGeneration,
"""text2text-generation""": TFBlenderbotForConditionalGeneration,
"""translation""": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowerCamelCase : Union[str, Any] = True
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : Union[str, Any] = False
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : List[str] =TFBlenderbotModelTester(self )
UpperCAmelCase : List[Any] =ConfigTester(self , config_class=snake_case__ )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : int =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case__ )
@require_tokenizers
@require_tf
class __snake_case ( unittest.TestCase ):
__lowerCamelCase : List[str] = ["""My friends are cool but they eat too many carbs."""]
__lowerCamelCase : Dict = """facebook/blenderbot-400M-distill"""
@cached_property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : int =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Optional[int] =self.tokenizer(self.src_text , return_tensors='''tf''' )
UpperCAmelCase : Optional[int] =self.model.generate(
model_inputs.input_ids , )
UpperCAmelCase : str =self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=snake_case__ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 348 | 0 |
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class _lowercase ( lowerCamelCase__, unittest.TestCase ):
"""simple docstring"""
__A = RoFormerTokenizer
__A = RoFormerTokenizerFast
__A = True
__A = True
def UpperCamelCase_ (self ):
"""simple docstring"""
super().setUp()
def UpperCamelCase_ (self , **lowerCamelCase_ ):
"""simple docstring"""
return self.tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **snake_case__ )
def UpperCamelCase_ (self , **lowerCamelCase_ ):
"""simple docstring"""
return self.rust_tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **snake_case__ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = '''永和服装饰品有限公司,今天天气非常好'''
a = '''永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'''
return input_text, output_text
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.get_tokenizer()
a = self.get_chinese_input_output_texts()
a = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , output_text.split() )
a = tokens + [tokenizer.unk_token]
a = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.get_rust_tokenizer()
a = self.get_chinese_input_output_texts()
a = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , output_text.split() )
a = tokens + [tokenizer.unk_token]
a = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ )
def UpperCamelCase_ (self ):
"""simple docstring"""
pass
def UpperCamelCase_ (self ):
"""simple docstring"""
pass
def UpperCamelCase_ (self ):
"""simple docstring"""
pass
| 227 | import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] = """sew-d"""
def __init__( self , snake_case__=32 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__=2 , snake_case__=512 , snake_case__=256 , snake_case__=True , snake_case__=True , snake_case__=("p2c", "c2p") , snake_case__="layer_norm" , snake_case__="gelu_python" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.1 , snake_case__=0.02 , snake_case__=1e-7 , snake_case__=1e-5 , snake_case__="group" , snake_case__="gelu" , snake_case__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , snake_case__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , snake_case__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , snake_case__=False , snake_case__=128 , snake_case__=16 , snake_case__=True , snake_case__=0.05 , snake_case__=10 , snake_case__=2 , snake_case__=0.0 , snake_case__=10 , snake_case__=0 , snake_case__="mean" , snake_case__=False , snake_case__=False , snake_case__=256 , snake_case__=0 , snake_case__=1 , snake_case__=2 , **snake_case__ , ) -> int:
'''simple docstring'''
super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ )
UpperCAmelCase : Union[str, Any] =hidden_size
UpperCAmelCase : Union[str, Any] =feat_extract_norm
UpperCAmelCase : Optional[Any] =feat_extract_activation
UpperCAmelCase : List[str] =list(snake_case__ )
UpperCAmelCase : int =list(snake_case__ )
UpperCAmelCase : List[str] =list(snake_case__ )
UpperCAmelCase : str =conv_bias
UpperCAmelCase : Tuple =num_conv_pos_embeddings
UpperCAmelCase : Dict =num_conv_pos_embedding_groups
UpperCAmelCase : str =len(self.conv_dim )
UpperCAmelCase : Dict =num_hidden_layers
UpperCAmelCase : Optional[int] =intermediate_size
UpperCAmelCase : List[Any] =squeeze_factor
UpperCAmelCase : str =max_position_embeddings
UpperCAmelCase : int =position_buckets
UpperCAmelCase : Optional[int] =share_att_key
UpperCAmelCase : Optional[int] =relative_attention
UpperCAmelCase : Tuple =norm_rel_ebd
UpperCAmelCase : List[Any] =list(snake_case__ )
UpperCAmelCase : Dict =hidden_act
UpperCAmelCase : Optional[int] =num_attention_heads
UpperCAmelCase : Any =hidden_dropout
UpperCAmelCase : str =attention_dropout
UpperCAmelCase : Union[str, Any] =activation_dropout
UpperCAmelCase : str =feat_proj_dropout
UpperCAmelCase : Union[str, Any] =final_dropout
UpperCAmelCase : Optional[int] =layer_norm_eps
UpperCAmelCase : str =feature_layer_norm_eps
UpperCAmelCase : str =initializer_range
UpperCAmelCase : Any =vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase : Union[str, Any] =apply_spec_augment
UpperCAmelCase : Optional[Any] =mask_time_prob
UpperCAmelCase : Tuple =mask_time_length
UpperCAmelCase : str =mask_time_min_masks
UpperCAmelCase : Optional[int] =mask_feature_prob
UpperCAmelCase : Optional[Any] =mask_feature_length
UpperCAmelCase : List[Any] =mask_feature_min_masks
# ctc loss
UpperCAmelCase : str =ctc_loss_reduction
UpperCAmelCase : Optional[int] =ctc_zero_infinity
# sequence classification
UpperCAmelCase : Union[str, Any] =use_weighted_layer_sum
UpperCAmelCase : int =classifier_proj_size
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 348 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'facebook/xlm-roberta-xl': 'https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json',
'facebook/xlm-roberta-xxl': 'https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __lowerCAmelCase ( lowerCamelCase__ ):
_a = """xlm-roberta-xl"""
def __init__( self , lowerCAmelCase=250_880 , lowerCAmelCase=2_560 , lowerCAmelCase=36 , lowerCAmelCase=32 , lowerCAmelCase=10_240 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=514 , lowerCAmelCase=1 , lowerCAmelCase=0.02 , lowerCAmelCase=1e-05 , lowerCAmelCase=1 , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase="absolute" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
_lowercase =vocab_size
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =hidden_act
_lowercase =intermediate_size
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =max_position_embeddings
_lowercase =type_vocab_size
_lowercase =initializer_range
_lowercase =layer_norm_eps
_lowercase =position_embedding_type
_lowercase =use_cache
_lowercase =classifier_dropout
class __lowerCAmelCase ( lowerCamelCase__ ):
@property
def A__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_lowercase ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_lowercase ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 205 | import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
__snake_case = 4
__snake_case = 3
class __snake_case ( lowerCamelCase__ ):
pass
def lowerCAmelCase_ ( __lowerCAmelCase )-> List[str]:
'''simple docstring'''
for shard in shards:
for i in range(__lowerCAmelCase ):
yield {"i": i, "shard": shard}
def lowerCAmelCase_ ( )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase : List[str] =int(os.environ['''RANK'''] )
UpperCAmelCase : Optional[Any] =int(os.environ['''WORLD_SIZE'''] )
UpperCAmelCase : List[Any] =ArgumentParser()
parser.add_argument('''--streaming''' , type=__lowerCAmelCase )
parser.add_argument('''--local_rank''' , type=__lowerCAmelCase )
parser.add_argument('''--num_workers''' , type=__lowerCAmelCase , default=0 )
UpperCAmelCase : Any =parser.parse_args()
UpperCAmelCase : List[str] =args.streaming
UpperCAmelCase : Tuple =args.num_workers
UpperCAmelCase : int ={'''shards''': [f'''shard_{shard_idx}''' for shard_idx in range(__lowerCAmelCase )]}
UpperCAmelCase : Optional[int] =IterableDataset.from_generator(__lowerCAmelCase , gen_kwargs=__lowerCAmelCase )
if not streaming:
UpperCAmelCase : List[Any] =Dataset.from_list(list(__lowerCAmelCase ) )
UpperCAmelCase : Dict =split_dataset_by_node(__lowerCAmelCase , rank=__lowerCAmelCase , world_size=__lowerCAmelCase )
UpperCAmelCase : List[Any] =torch.utils.data.DataLoader(__lowerCAmelCase , num_workers=__lowerCAmelCase )
UpperCAmelCase : Dict =NUM_SHARDS * NUM_ITEMS_PER_SHARD
UpperCAmelCase : str =full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
UpperCAmelCase : List[Any] =sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''' )
if __name__ == "__main__":
main()
| 348 | 0 |
"""simple docstring"""
def _lowercase ( __lowerCAmelCase = 1000 ) -> int:
SCREAMING_SNAKE_CASE__ : Optional[Any] = -1
SCREAMING_SNAKE_CASE__ : Tuple = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
SCREAMING_SNAKE_CASE__ : int = (n * n - 2 * a * n) // (2 * n - 2 * a)
SCREAMING_SNAKE_CASE__ : List[str] = n - a - b
if c * c == (a * a + b * b):
SCREAMING_SNAKE_CASE__ : str = a * b * c
if candidate >= product:
SCREAMING_SNAKE_CASE__ : List[Any] = candidate
return product
if __name__ == "__main__":
print(f'{solution() = }')
| 132 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 348 | 0 |
'''simple docstring'''
from numpy import exp, pi, sqrt
def lowercase_ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] = 0.0 , lowerCAmelCase__ : int = 1.0 ):
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 254 | import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __snake_case :
def __init__( self , snake_case__ , snake_case__=14 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=False , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=4 , snake_case__=4 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=0.02 , ) -> str:
'''simple docstring'''
UpperCAmelCase : str =parent
UpperCAmelCase : Tuple =batch_size
UpperCAmelCase : Optional[int] =seq_length
UpperCAmelCase : Optional[int] =is_training
UpperCAmelCase : Tuple =use_input_mask
UpperCAmelCase : List[Any] =use_token_type_ids
UpperCAmelCase : Optional[Any] =use_labels
UpperCAmelCase : Union[str, Any] =vocab_size
UpperCAmelCase : List[Any] =hidden_size
UpperCAmelCase : Optional[int] =rotary_dim
UpperCAmelCase : Union[str, Any] =num_hidden_layers
UpperCAmelCase : List[Any] =num_attention_heads
UpperCAmelCase : Dict =intermediate_size
UpperCAmelCase : Union[str, Any] =hidden_act
UpperCAmelCase : Any =hidden_dropout_prob
UpperCAmelCase : Dict =attention_probs_dropout_prob
UpperCAmelCase : Union[str, Any] =max_position_embeddings
UpperCAmelCase : str =initializer_range
UpperCAmelCase : Optional[int] =None
UpperCAmelCase : List[Any] =vocab_size - 1
UpperCAmelCase : Optional[Any] =vocab_size - 1
UpperCAmelCase : List[Any] =vocab_size - 1
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : List[Any] =None
if self.use_input_mask:
UpperCAmelCase : Optional[Any] =random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Dict =GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=snake_case__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Tuple =self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] =config_and_inputs
UpperCAmelCase : Tuple ={'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Any =20
UpperCAmelCase : Any =model_class_name(snake_case__ )
UpperCAmelCase : str =model.init_cache(input_ids.shape[0] , snake_case__ )
UpperCAmelCase : Any =jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
UpperCAmelCase : Optional[Any] =jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
UpperCAmelCase : Optional[Any] =model(
input_ids[:, :-1] , attention_mask=snake_case__ , past_key_values=snake_case__ , position_ids=snake_case__ , )
UpperCAmelCase : List[str] =jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase : Optional[Any] =model(
input_ids[:, -1:] , attention_mask=snake_case__ , past_key_values=outputs_cache.past_key_values , position_ids=snake_case__ , )
UpperCAmelCase : List[Any] =model(snake_case__ )
UpperCAmelCase : Any =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Dict =20
UpperCAmelCase : Dict =model_class_name(snake_case__ )
UpperCAmelCase : Tuple =jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
UpperCAmelCase : Dict =model.init_cache(input_ids.shape[0] , snake_case__ )
UpperCAmelCase : int =jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
UpperCAmelCase : Optional[Any] =model(
input_ids[:, :-1] , attention_mask=snake_case__ , past_key_values=snake_case__ , position_ids=snake_case__ , )
UpperCAmelCase : Any =jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase : str =model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=snake_case__ , position_ids=snake_case__ , )
UpperCAmelCase : Any =model(snake_case__ , attention_mask=snake_case__ )
UpperCAmelCase : Dict =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class __snake_case ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Tuple = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
__lowerCamelCase : Optional[Any] = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =FlaxGPTJModelTester(self )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
@tooslow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Tuple =GPTaTokenizer.from_pretrained('''gpt2''' , pad_token='''<|endoftext|>''' , padding_side='''left''' )
UpperCAmelCase : Optional[Any] =tokenizer(['''Hello this is a long string''', '''Hey'''] , return_tensors='''np''' , padding=snake_case__ , truncation=snake_case__ )
UpperCAmelCase : Optional[int] =FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''' )
UpperCAmelCase : str =False
UpperCAmelCase : Union[str, Any] =model.config.eos_token_id
UpperCAmelCase : List[Any] =jax.jit(model.generate )
UpperCAmelCase : Dict =jit_generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , pad_token_id=tokenizer.pad_token_id ).sequences
UpperCAmelCase : Any =tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )
UpperCAmelCase : Tuple =[
'''Hello this is a long string of text.\n\nI\'m trying to get the text of the''',
'''Hey, I\'m a little late to the party. I\'m going to''',
]
self.assertListEqual(snake_case__ , snake_case__ )
@is_pt_flax_cross_test
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
UpperCAmelCase : Union[str, Any] =self._prepare_for_class(snake_case__ , snake_case__ )
UpperCAmelCase : List[str] ={k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
UpperCAmelCase : Any =model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCAmelCase : Any =getattr(snake_case__ , snake_case__ )
UpperCAmelCase , UpperCAmelCase : Union[str, Any] =pt_inputs['''input_ids'''].shape
UpperCAmelCase : Tuple =np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(snake_case__ ):
UpperCAmelCase : int =0
UpperCAmelCase : Optional[int] =1
UpperCAmelCase : Optional[int] =0
UpperCAmelCase : Union[str, Any] =1
UpperCAmelCase : List[str] =pt_model_class(snake_case__ ).eval()
UpperCAmelCase : Optional[int] =model_class(snake_case__ , dtype=jnp.floataa )
UpperCAmelCase : Any =convert_pytorch_state_dict_to_flax(pt_model.state_dict() , snake_case__ )
UpperCAmelCase : Union[str, Any] =fx_state
with torch.no_grad():
UpperCAmelCase : Any =pt_model(**snake_case__ ).to_tuple()
UpperCAmelCase : Dict =fx_model(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(snake_case__ , snake_case__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(snake_case__ )
UpperCAmelCase : str =model_class.from_pretrained(snake_case__ , from_pt=snake_case__ )
UpperCAmelCase : int =fx_model_loaded(**snake_case__ ).to_tuple()
self.assertEqual(
len(snake_case__ ) , len(snake_case__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(snake_case__ , snake_case__ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Any =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
UpperCAmelCase : Union[str, Any] =self._prepare_for_class(snake_case__ , snake_case__ )
UpperCAmelCase : Union[str, Any] ={k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
UpperCAmelCase : int =model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCAmelCase : int =getattr(snake_case__ , snake_case__ )
UpperCAmelCase : Dict =pt_model_class(snake_case__ ).eval()
UpperCAmelCase : str =model_class(snake_case__ , dtype=jnp.floataa )
UpperCAmelCase : Optional[Any] =load_flax_weights_in_pytorch_model(snake_case__ , fx_model.params )
UpperCAmelCase , UpperCAmelCase : Optional[int] =pt_inputs['''input_ids'''].shape
UpperCAmelCase : Optional[int] =np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(snake_case__ ):
UpperCAmelCase : str =0
UpperCAmelCase : Any =1
UpperCAmelCase : List[Any] =0
UpperCAmelCase : Tuple =1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
UpperCAmelCase : Optional[Any] =pt_model(**snake_case__ ).to_tuple()
UpperCAmelCase : List[Any] =fx_model(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(snake_case__ , snake_case__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(snake_case__ )
UpperCAmelCase : Tuple =pt_model_class.from_pretrained(snake_case__ , from_flax=snake_case__ )
with torch.no_grad():
UpperCAmelCase : Any =pt_model_loaded(**snake_case__ ).to_tuple()
self.assertEqual(
len(snake_case__ ) , len(snake_case__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(snake_case__ , snake_case__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase : str =model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''' )
UpperCAmelCase : Tuple =model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case__ )
| 348 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
lowerCAmelCase_ : Any = ["""image_processor""", """tokenizer"""]
lowerCAmelCase_ : Optional[int] = """BlipImageProcessor"""
lowerCAmelCase_ : List[Any] = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = False
super().__init__(snake_case__ , snake_case__ )
UpperCAmelCase__ = self.image_processor
def __call__( self : str , _UpperCAmelCase : Dict = None , _UpperCAmelCase : str = None , _UpperCAmelCase : int = True , _UpperCAmelCase : Tuple = False , _UpperCAmelCase : Any = None , _UpperCAmelCase : Dict = None , _UpperCAmelCase : Union[str, Any] = 0 , _UpperCAmelCase : Tuple = None , _UpperCAmelCase : int = None , _UpperCAmelCase : Optional[int] = False , _UpperCAmelCase : List[str] = False , _UpperCAmelCase : Any = False , _UpperCAmelCase : str = False , _UpperCAmelCase : List[Any] = False , _UpperCAmelCase : Dict = True , _UpperCAmelCase : str = None , **_UpperCAmelCase : List[Any] , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
UpperCAmelCase__ = self.tokenizer
UpperCAmelCase__ = self.tokenizer(
text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
return text_encoding
# add pixel_values
UpperCAmelCase__ = self.image_processor(snake_case__ , return_tensors=snake_case__ )
if text is not None:
UpperCAmelCase__ = self.tokenizer(
text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
else:
UpperCAmelCase__ = None
if text_encoding is not None:
encoding_image_processor.update(snake_case__ )
return encoding_image_processor
def SCREAMING_SNAKE_CASE__ ( self : int , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : int ):
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , *_UpperCAmelCase : str , **_UpperCAmelCase : List[Any] ):
"""simple docstring"""
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer.model_input_names
UpperCAmelCase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 346 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case = {
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 348 | 0 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
__magic_name__: List[Any] = logging.getLogger(__name__)
__magic_name__: Optional[int] = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
__magic_name__: List[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class snake_case__ :
lowercase__ : Optional[str] = field(
default=lowerCamelCase__ , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
lowercase__ : Optional[str] = field(
default=lowerCamelCase__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(lowerCamelCase__ )} , )
lowercase__ : Optional[str] = field(
default=lowerCamelCase__ , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
lowercase__ : Optional[str] = field(
default=lowerCamelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowercase__ : Optional[str] = field(
default=lowerCamelCase__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowercase__ : Optional[str] = field(
default=lowerCamelCase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
lowercase__ : bool = field(
default=lowerCamelCase__ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
lowercase__ : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
lowercase__ : bool = field(
default=lowerCamelCase__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def __magic_name__ ( self ) -> Dict:
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"""--config_overrides can\'t be used in combination with --config_name or --model_name_or_path""" )
@dataclass
class snake_case__ :
lowercase__ : Optional[str] = field(
default=lowerCamelCase__ , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
lowercase__ : Optional[str] = field(
default=lowerCamelCase__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
lowercase__ : Optional[str] = field(default=lowerCamelCase__ , metadata={'''help''': '''The input training data file (a text file).'''} )
lowercase__ : Optional[str] = field(
default=lowerCamelCase__ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
lowercase__ : Optional[str] = field(
default=lowerCamelCase__ , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
lowercase__ : Optional[str] = field(
default=lowerCamelCase__ , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
lowercase__ : bool = field(
default=lowerCamelCase__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
lowercase__ : Optional[int] = field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
lowercase__ : Optional[int] = field(
default=lowerCamelCase__ , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
lowercase__ : Optional[int] = field(
default=lowerCamelCase__ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
lowercase__ : float = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
lowercase__ : bool = field(
default=lowerCamelCase__ , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def __magic_name__ ( self ) -> Optional[int]:
if self.train_file is not None:
__magic_name__ : List[Any] = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
__magic_name__ : Any = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
with open(__lowerCAmelCase, """r""", encoding="""utf-8""" ) as f:
__magic_name__ : str = [json.loads(__lowerCAmelCase ) for line in f.read().splitlines() if (len(__lowerCAmelCase ) > 0 and not line.isspace())]
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
__magic_name__ : List[Any] = {c: dataset[c] for c in dataset.column_names}
__magic_name__ : Any = refs
return Dataset.from_dict(__lowerCAmelCase )
def UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__magic_name__ : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__magic_name__ : Any = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__magic_name__ : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__magic_name__ : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", handlers=[logging.StreamHandler(sys.stdout )], )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""", __lowerCAmelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__magic_name__ : Tuple = load_dataset(data_args.dataset_name, data_args.dataset_config_name )
if "validation" not in datasets.keys():
__magic_name__ : Tuple = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=f'train[:{data_args.validation_split_percentage}%]', )
__magic_name__ : Optional[int] = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=f'train[{data_args.validation_split_percentage}%:]', )
else:
__magic_name__ : List[str] = {}
if data_args.train_file is not None:
__magic_name__ : List[str] = data_args.train_file
if data_args.validation_file is not None:
__magic_name__ : Tuple = data_args.validation_file
__magic_name__ : int = data_args.train_file.split(""".""" )[-1]
if extension == "txt":
__magic_name__ : int = '''text'''
__magic_name__ : int = load_dataset(__lowerCAmelCase, data_files=__lowerCAmelCase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__magic_name__ : List[str] = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
__magic_name__ : str = AutoConfig.from_pretrained(model_args.config_name, **__lowerCAmelCase )
elif model_args.model_name_or_path:
__magic_name__ : Dict = AutoConfig.from_pretrained(model_args.model_name_or_path, **__lowerCAmelCase )
else:
__magic_name__ : int = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(f'New config: {config}' )
__magic_name__ : Any = {
'''cache_dir''': model_args.cache_dir,
'''use_fast''': model_args.use_fast_tokenizer,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
__magic_name__ : Any = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **__lowerCAmelCase )
elif model_args.model_name_or_path:
__magic_name__ : str = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **__lowerCAmelCase )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name.""" )
if model_args.model_name_or_path:
__magic_name__ : Dict = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path, from_tf=bool(""".ckpt""" in model_args.model_name_or_path ), config=__lowerCAmelCase, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
else:
logger.info("""Training new model from scratch""" )
__magic_name__ : Optional[Any] = AutoModelForMaskedLM.from_config(__lowerCAmelCase )
model.resize_token_embeddings(len(__lowerCAmelCase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
__magic_name__ : str = datasets['''train'''].column_names
else:
__magic_name__ : Any = datasets['''validation'''].column_names
__magic_name__ : Optional[Any] = '''text''' if '''text''' in column_names else column_names[0]
__magic_name__ : Optional[Any] = '''max_length''' if data_args.pad_to_max_length else False
def tokenize_function(_A ):
# Remove empty lines
__magic_name__ : Dict = [line for line in examples['''text'''] if len(__lowerCAmelCase ) > 0 and not line.isspace()]
return tokenizer(examples["""text"""], padding=__lowerCAmelCase, truncation=__lowerCAmelCase, max_length=data_args.max_seq_length )
__magic_name__ : Optional[int] = datasets.map(
__lowerCAmelCase, batched=__lowerCAmelCase, num_proc=data_args.preprocessing_num_workers, remove_columns=[text_column_name], load_from_cache_file=not data_args.overwrite_cache, )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
__magic_name__ : Tuple = add_chinese_references(tokenized_datasets["""train"""], data_args.train_ref_file )
if data_args.validation_ref_file is not None:
__magic_name__ : int = add_chinese_references(
tokenized_datasets["""validation"""], data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
__magic_name__ : Optional[Any] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
__magic_name__ : Tuple = False
# Data collator
# This one will take care of randomly masking the tokens.
__magic_name__ : List[Any] = DataCollatorForWholeWordMask(tokenizer=__lowerCAmelCase, mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__magic_name__ : List[Any] = Trainer(
model=__lowerCAmelCase, args=__lowerCAmelCase, train_dataset=tokenized_datasets["""train"""] if training_args.do_train else None, eval_dataset=tokenized_datasets["""validation"""] if training_args.do_eval else None, tokenizer=__lowerCAmelCase, data_collator=__lowerCAmelCase, )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__magic_name__ : Union[str, Any] = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
__magic_name__ : List[str] = model_args.model_name_or_path
else:
__magic_name__ : Dict = None
__magic_name__ : List[str] = trainer.train(resume_from_checkpoint=__lowerCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
__magic_name__ : List[Any] = os.path.join(training_args.output_dir, """train_results.txt""" )
if trainer.is_world_process_zero():
with open(__lowerCAmelCase, """w""" ) as writer:
logger.info("""***** Train results *****""" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(f' {key} = {value}' )
writer.write(f'{key} = {value}\n' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir, """trainer_state.json""" ) )
# Evaluation
__magic_name__ : str = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__magic_name__ : Optional[int] = trainer.evaluate()
__magic_name__ : Optional[Any] = math.exp(eval_output["""eval_loss"""] )
__magic_name__ : List[Any] = perplexity
__magic_name__ : Optional[Any] = os.path.join(training_args.output_dir, """eval_results_mlm_wwm.txt""" )
if trainer.is_world_process_zero():
with open(__lowerCAmelCase, """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in sorted(results.items() ):
logger.info(f' {key} = {value}' )
writer.write(f'{key} = {value}\n' )
return results
def UpperCamelCase ( _A ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 342 | import os
from typing import Dict, List, Tuple, TypeVar, Union
__snake_case = TypeVar('''T''')
__snake_case = Union[List[T], Tuple[T, ...]]
__snake_case = Union[T, List[T], Dict[str, T]]
__snake_case = Union[str, bytes, os.PathLike]
| 348 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"configuration_blenderbot_small": [
"BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotSmallConfig",
"BlenderbotSmallOnnxConfig",
],
"tokenization_blenderbot_small": ["BlenderbotSmallTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["BlenderbotSmallTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotSmallForCausalLM",
"BlenderbotSmallForConditionalGeneration",
"BlenderbotSmallModel",
"BlenderbotSmallPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"TFBlenderbotSmallForConditionalGeneration",
"TFBlenderbotSmallModel",
"TFBlenderbotSmallPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"FlaxBlenderbotSmallForConditionalGeneration",
"FlaxBlenderbotSmallModel",
"FlaxBlenderbotSmallPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 46 | import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__snake_case = None
__snake_case = logging.get_logger(__name__)
__snake_case = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__snake_case = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
},
'''tokenizer_file''': {
'''google/bigbird-roberta-base''': (
'''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'''
),
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'''
),
},
}
__snake_case = {
'''google/bigbird-roberta-base''': 40_96,
'''google/bigbird-roberta-large''': 40_96,
'''google/bigbird-base-trivia-itc''': 40_96,
}
__snake_case = '''▁'''
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : Dict = VOCAB_FILES_NAMES
__lowerCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : List[str] = BigBirdTokenizer
__lowerCamelCase : Any = ["""input_ids""", """attention_mask"""]
__lowerCamelCase : List[int] = []
def __init__( self , snake_case__=None , snake_case__=None , snake_case__="<unk>" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="<pad>" , snake_case__="[SEP]" , snake_case__="[MASK]" , snake_case__="[CLS]" , **snake_case__ , ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Any =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else bos_token
UpperCAmelCase : Optional[int] =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else eos_token
UpperCAmelCase : List[str] =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else unk_token
UpperCAmelCase : Union[str, Any] =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else pad_token
UpperCAmelCase : int =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else cls_token
UpperCAmelCase : str =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase : List[Any] =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , **snake_case__ , )
UpperCAmelCase : Tuple =vocab_file
UpperCAmelCase : Optional[int] =False if not self.vocab_file else True
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase : int =[self.sep_token_id]
UpperCAmelCase : Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None , snake_case__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =[self.sep_token_id]
UpperCAmelCase : Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(snake_case__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase : Optional[int] =os.path.join(
snake_case__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
return (out_vocab_file,)
| 348 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A : Optional[Any] = logging.get_logger(__name__)
class A (lowerCamelCase__ ):
'''simple docstring'''
__lowerCamelCase : List[str] = ["""pixel_values"""]
def __init__( self : int , __lowerCAmelCase : Tuple = True , __lowerCAmelCase : int = None , __lowerCAmelCase : Any = None , __lowerCAmelCase : Optional[int] = PILImageResampling.BILINEAR , __lowerCAmelCase : List[str] = True , __lowerCAmelCase : Any = 1 / 2_55 , __lowerCAmelCase : List[str] = True , __lowerCAmelCase : int = None , __lowerCAmelCase : Optional[Any] = None , **__lowerCAmelCase : Optional[int] , ) -> None:
"""simple docstring"""
super().__init__(**snake_case__ )
A__ = size if size is not None else {'''shortest_edge''': 3_84}
A__ = get_size_dict(snake_case__ , default_to_square=snake_case__ )
A__ = do_resize
A__ = size
# Default value set here for backwards compatibility where the value in config is None
A__ = crop_pct if crop_pct is not None else 2_24 / 2_56
A__ = resample
A__ = do_rescale
A__ = rescale_factor
A__ = do_normalize
A__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a_ ( self : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : str = PILImageResampling.BICUBIC , __lowerCAmelCase : Optional[Any] = None , **__lowerCAmelCase : str , ) -> np.ndarray:
"""simple docstring"""
A__ = get_size_dict(snake_case__ , default_to_square=snake_case__ )
if "shortest_edge" not in size:
raise ValueError(f'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
A__ = size['''shortest_edge''']
if shortest_edge < 3_84:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
A__ = int(shortest_edge / crop_pct )
A__ = get_resize_output_image_size(snake_case__ , size=snake_case__ , default_to_square=snake_case__ )
A__ = resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=snake_case__ , size=(shortest_edge, shortest_edge) , data_format=snake_case__ , **snake_case__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
snake_case__ , size=(shortest_edge, shortest_edge) , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
def a_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : int = None , **__lowerCAmelCase : List[Any] , ) -> int:
"""simple docstring"""
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def a_ ( self : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] = None , **__lowerCAmelCase : List[Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(snake_case__ , mean=snake_case__ , std=snake_case__ , data_format=snake_case__ , **snake_case__ )
def a_ ( self : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] = None , __lowerCAmelCase : Optional[Any] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : int = None , __lowerCAmelCase : Dict = None , __lowerCAmelCase : Optional[Any] = None , __lowerCAmelCase : str = None , __lowerCAmelCase : int = None , __lowerCAmelCase : Any = None , __lowerCAmelCase : Optional[Any] = ChannelDimension.FIRST , **__lowerCAmelCase : int , ) -> PIL.Image.Image:
"""simple docstring"""
A__ = do_resize if do_resize is not None else self.do_resize
A__ = crop_pct if crop_pct is not None else self.crop_pct
A__ = resample if resample is not None else self.resample
A__ = do_rescale if do_rescale is not None else self.do_rescale
A__ = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = image_mean if image_mean is not None else self.image_mean
A__ = image_std if image_std is not None else self.image_std
A__ = size if size is not None else self.size
A__ = get_size_dict(snake_case__ , default_to_square=snake_case__ )
A__ = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 3_84 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A__ = [to_numpy_array(snake_case__ ) for image in images]
if do_resize:
A__ = [self.resize(image=snake_case__ , size=snake_case__ , crop_pct=snake_case__ , resample=snake_case__ ) for image in images]
if do_rescale:
A__ = [self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
if do_normalize:
A__ = [self.normalize(image=snake_case__ , mean=snake_case__ , std=snake_case__ ) for image in images]
A__ = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
A__ = {'''pixel_values''': images}
return BatchFeature(data=snake_case__ , tensor_type=snake_case__ )
| 274 | from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase_ ( __lowerCAmelCase )-> Optional[Any]:
'''simple docstring'''
def is_in_circle(__lowerCAmelCase , __lowerCAmelCase ) -> bool:
UpperCAmelCase : List[Any] =sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
UpperCAmelCase : List[Any] =mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(__lowerCAmelCase ) )
# The ratio of the area for circle to square is pi/4.
UpperCAmelCase : Dict =proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1.0 , )-> float:
'''simple docstring'''
return mean(
function_to_integrate(uniform(__lowerCAmelCase , __lowerCAmelCase ) ) for _ in range(__lowerCAmelCase ) ) * (max_value - min_value)
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1.0 )-> None:
'''simple docstring'''
def identity_function(__lowerCAmelCase ) -> float:
return x
UpperCAmelCase : List[Any] =area_under_curve_estimator(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
UpperCAmelCase : Dict =(max_value * max_value - min_value * min_value) / 2
print('''******************''' )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print('''******************''' )
def lowerCAmelCase_ ( __lowerCAmelCase )-> None:
'''simple docstring'''
def function_to_integrate(__lowerCAmelCase ) -> float:
return sqrt(4.0 - x * x )
UpperCAmelCase : Dict =area_under_curve_estimator(
__lowerCAmelCase , __lowerCAmelCase , 0.0 , 2.0 )
print('''******************''' )
print('''Estimating pi using area_under_curve_estimator''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print('''******************''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348 | 0 |
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class A_ ( lowerCamelCase__ ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = (KDPMaDiscreteScheduler,)
_UpperCamelCase : List[str] = 10
def SCREAMING_SNAKE_CASE__ ( self , **snake_case ):
lowercase = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**snake_case__ )
return config
def SCREAMING_SNAKE_CASE__ ( self ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self ):
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(prediction_type='v_prediction' )
lowercase = scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps )
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma
lowercase = sample.to(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
lowercase = scheduler.scale_model_input(snake_case__ , snake_case__ )
lowercase = model(snake_case__ , snake_case__ )
lowercase = scheduler.step(snake_case__ , snake_case__ , snake_case__ )
lowercase = output.prev_sample
lowercase = torch.sum(torch.abs(snake_case__ ) )
lowercase = torch.mean(torch.abs(snake_case__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_934E-07 ) < 1E-2
assert abs(result_mean.item() - 6.1_112E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_428_650_170_972E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0_002 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ):
if torch_device == "mps":
return
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps )
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma
lowercase = sample.to(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
lowercase = scheduler.scale_model_input(snake_case__ , snake_case__ )
lowercase = model(snake_case__ , snake_case__ )
lowercase = scheduler.step(snake_case__ , snake_case__ , snake_case__ )
lowercase = output.prev_sample
lowercase = torch.sum(torch.abs(snake_case__ ) )
lowercase = torch.mean(torch.abs(snake_case__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ):
if torch_device == "mps":
return
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case__ )
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter.to(snake_case__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowercase = scheduler.scale_model_input(snake_case__ , snake_case__ )
lowercase = model(snake_case__ , snake_case__ )
lowercase = scheduler.step(snake_case__ , snake_case__ , snake_case__ )
lowercase = output.prev_sample
lowercase = torch.sum(torch.abs(snake_case__ ) )
lowercase = torch.mean(torch.abs(snake_case__ ) )
if str(snake_case__ ).startswith('cpu' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
| 195 | from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case :
def __init__( self , snake_case__ , snake_case__=12 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=32 , snake_case__=2 , snake_case__=4 , snake_case__=37 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=0.02 , snake_case__=0 , snake_case__=None , ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : List[Any] =parent
UpperCAmelCase : Optional[int] =batch_size
UpperCAmelCase : List[Any] =seq_length
UpperCAmelCase : Optional[int] =is_training
UpperCAmelCase : Union[str, Any] =use_input_mask
UpperCAmelCase : Tuple =use_labels
UpperCAmelCase : Union[str, Any] =vocab_size
UpperCAmelCase : Tuple =hidden_size
UpperCAmelCase : Dict =projection_dim
UpperCAmelCase : Optional[int] =num_hidden_layers
UpperCAmelCase : Dict =num_attention_heads
UpperCAmelCase : int =intermediate_size
UpperCAmelCase : Any =dropout
UpperCAmelCase : Union[str, Any] =attention_dropout
UpperCAmelCase : Union[str, Any] =max_position_embeddings
UpperCAmelCase : List[str] =initializer_range
UpperCAmelCase : str =scope
UpperCAmelCase : str =bos_token_id
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : int =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : int =None
if self.use_input_mask:
UpperCAmelCase : Union[str, Any] =random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
UpperCAmelCase : Optional[int] =input_mask.numpy()
UpperCAmelCase , UpperCAmelCase : List[Any] =input_mask.shape
UpperCAmelCase : Optional[Any] =np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(snake_case__ ):
UpperCAmelCase : List[Any] =1
UpperCAmelCase : Tuple =0
UpperCAmelCase : List[Any] =self.get_config()
return config, input_ids, tf.convert_to_tensor(snake_case__ )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Tuple =TFBlipTextModel(config=snake_case__ )
UpperCAmelCase : List[Any] =model(snake_case__ , attention_mask=snake_case__ , training=snake_case__ )
UpperCAmelCase : str =model(snake_case__ , training=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[str] =self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] =config_and_inputs
UpperCAmelCase : Optional[int] ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __snake_case ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Optional[int] = (TFBlipTextModel,) if is_tf_available() else ()
__lowerCamelCase : Dict = False
__lowerCamelCase : Optional[Any] = False
__lowerCamelCase : Dict = False
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : str =BlipTextModelTester(self )
UpperCAmelCase : Optional[int] =ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
pass
@slow
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Optional[Any] =TFBlipTextModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def UpperCAmelCase__ ( self , snake_case__=True ) -> Any:
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=snake_case__ )
| 348 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A =logging.get_logger(__name__)
__A ={
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _snake_case ( lowerCamelCase__ ):
lowerCAmelCase :Optional[Any] = """sew-d"""
def __init__( self , _lowerCamelCase=32 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase=2 , _lowerCamelCase=512 , _lowerCamelCase=256 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=("p2c", "c2p") , _lowerCamelCase="layer_norm" , _lowerCamelCase="gelu_python" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.1 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-7 , _lowerCamelCase=1e-5 , _lowerCamelCase="group" , _lowerCamelCase="gelu" , _lowerCamelCase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , _lowerCamelCase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _lowerCamelCase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _lowerCamelCase=False , _lowerCamelCase=128 , _lowerCamelCase=16 , _lowerCamelCase=True , _lowerCamelCase=0.05 , _lowerCamelCase=10 , _lowerCamelCase=2 , _lowerCamelCase=0.0 , _lowerCamelCase=10 , _lowerCamelCase=0 , _lowerCamelCase="mean" , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=256 , _lowerCamelCase=0 , _lowerCamelCase=1 , _lowerCamelCase=2 , **_lowerCamelCase , ):
super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__)
UpperCAmelCase__ : Union[str, Any] = hidden_size
UpperCAmelCase__ : Union[str, Any] = feat_extract_norm
UpperCAmelCase__ : Optional[Any] = feat_extract_activation
UpperCAmelCase__ : List[str] = list(snake_case__)
UpperCAmelCase__ : int = list(snake_case__)
UpperCAmelCase__ : List[str] = list(snake_case__)
UpperCAmelCase__ : str = conv_bias
UpperCAmelCase__ : Tuple = num_conv_pos_embeddings
UpperCAmelCase__ : Dict = num_conv_pos_embedding_groups
UpperCAmelCase__ : str = len(self.conv_dim)
UpperCAmelCase__ : Dict = num_hidden_layers
UpperCAmelCase__ : Optional[int] = intermediate_size
UpperCAmelCase__ : List[Any] = squeeze_factor
UpperCAmelCase__ : str = max_position_embeddings
UpperCAmelCase__ : int = position_buckets
UpperCAmelCase__ : Optional[int] = share_att_key
UpperCAmelCase__ : Optional[int] = relative_attention
UpperCAmelCase__ : Tuple = norm_rel_ebd
UpperCAmelCase__ : List[Any] = list(snake_case__)
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : Optional[int] = num_attention_heads
UpperCAmelCase__ : Any = hidden_dropout
UpperCAmelCase__ : str = attention_dropout
UpperCAmelCase__ : Union[str, Any] = activation_dropout
UpperCAmelCase__ : str = feat_proj_dropout
UpperCAmelCase__ : Union[str, Any] = final_dropout
UpperCAmelCase__ : Optional[int] = layer_norm_eps
UpperCAmelCase__ : str = feature_layer_norm_eps
UpperCAmelCase__ : str = initializer_range
UpperCAmelCase__ : Any = vocab_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f'''but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)'''
f'''= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.''')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase__ : Union[str, Any] = apply_spec_augment
UpperCAmelCase__ : Optional[Any] = mask_time_prob
UpperCAmelCase__ : Tuple = mask_time_length
UpperCAmelCase__ : str = mask_time_min_masks
UpperCAmelCase__ : Optional[int] = mask_feature_prob
UpperCAmelCase__ : Optional[Any] = mask_feature_length
UpperCAmelCase__ : List[Any] = mask_feature_min_masks
# ctc loss
UpperCAmelCase__ : str = ctc_loss_reduction
UpperCAmelCase__ : Optional[int] = ctc_zero_infinity
# sequence classification
UpperCAmelCase__ : Union[str, Any] = use_weighted_layer_sum
UpperCAmelCase__ : int = classifier_proj_size
@property
def snake_case__ ( self):
return functools.reduce(operator.mul , self.conv_stride , 1) | 163 | import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__snake_case = logging.get_logger(__name__)
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> str:
'''simple docstring'''
UpperCAmelCase : Dict =nn.functional.normalize(__lowerCAmelCase )
UpperCAmelCase : Tuple =nn.functional.normalize(__lowerCAmelCase )
return torch.mm(__lowerCAmelCase , normalized_text_embeds.t() )
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : List[str] = CLIPConfig
__lowerCamelCase : List[Any] = ["""CLIPEncoderLayer"""]
def __init__( self , snake_case__ ) -> Dict:
'''simple docstring'''
super().__init__(snake_case__ )
UpperCAmelCase : Dict =CLIPVisionModel(config.vision_config )
UpperCAmelCase : Optional[Any] =nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=snake_case__ )
UpperCAmelCase : int =nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=snake_case__ )
UpperCAmelCase : List[str] =nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=snake_case__ )
UpperCAmelCase : str =nn.Parameter(torch.ones(17 ) , requires_grad=snake_case__ )
UpperCAmelCase : Optional[int] =nn.Parameter(torch.ones(3 ) , requires_grad=snake_case__ )
@torch.no_grad()
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =self.vision_model(snake_case__ )[1] # pooled_output
UpperCAmelCase : Optional[Any] =self.visual_projection(snake_case__ )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCAmelCase : List[str] =cosine_distance(snake_case__ , self.special_care_embeds ).cpu().float().numpy()
UpperCAmelCase : Optional[Any] =cosine_distance(snake_case__ , self.concept_embeds ).cpu().float().numpy()
UpperCAmelCase : Tuple =[]
UpperCAmelCase : Dict =image_embeds.shape[0]
for i in range(snake_case__ ):
UpperCAmelCase : str ={'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
UpperCAmelCase : str =0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
UpperCAmelCase : Optional[Any] =special_cos_dist[i][concept_idx]
UpperCAmelCase : Union[str, Any] =self.special_care_embeds_weights[concept_idx].item()
UpperCAmelCase : str =round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} )
UpperCAmelCase : int =0.01
for concept_idx in range(len(cos_dist[0] ) ):
UpperCAmelCase : Any =cos_dist[i][concept_idx]
UpperCAmelCase : Optional[int] =self.concept_embeds_weights[concept_idx].item()
UpperCAmelCase : int =round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(snake_case__ )
result.append(snake_case__ )
UpperCAmelCase : Optional[int] =[len(res['''bad_concepts'''] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Any =self.vision_model(snake_case__ )[1] # pooled_output
UpperCAmelCase : List[str] =self.visual_projection(snake_case__ )
UpperCAmelCase : Any =cosine_distance(snake_case__ , self.special_care_embeds )
UpperCAmelCase : Optional[Any] =cosine_distance(snake_case__ , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
UpperCAmelCase : Optional[Any] =0.0
UpperCAmelCase : Any =special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
UpperCAmelCase : str =torch.any(special_scores > 0 , dim=1 )
UpperCAmelCase : List[Any] =special_care * 0.01
UpperCAmelCase : Union[str, Any] =special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
UpperCAmelCase : List[Any] =(cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
UpperCAmelCase : str =torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 348 | 0 |
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
snake_case_ = """pixel_values"""
snake_case_ = False
snake_case_ = TimmBackboneConfig
def __init__( self : List[Any] ,A : Optional[int] ,**A : Union[str, Any] ):
requires_backends(self ,"timm" )
super().__init__(snake_case__ )
__A = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name." )
if config.backbone not in timm.list_models():
raise ValueError(f'''backbone {config.backbone} is not supported by timm.''' )
if hasattr(snake_case__ ,"out_features" ) and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." )
__A = getattr(snake_case__ ,"use_pretrained_backbone" ,snake_case__ )
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." )
# We just take the final layer by default. This matches the default for the transformers models.
__A = config.out_indices if getattr(snake_case__ ,"out_indices" ,snake_case__ ) is not None else (-1,)
__A = timm.create_model(
config.backbone ,pretrained=snake_case__ ,features_only=config.features_only ,in_chans=config.num_channels ,out_indices=snake_case__ ,**snake_case__ ,)
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
__A = self._backbone.return_layers
__A = {layer['''module''']: str(snake_case__ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(snake_case__ )
@classmethod
def UpperCamelCase_ ( cls : Dict ,A : Tuple ,*A : List[str] ,**A : Optional[int] ):
requires_backends(cls ,["vision", "timm"] )
from ...models.timm_backbone import TimmBackboneConfig
__A = kwargs.pop("config" ,TimmBackboneConfig() )
__A = kwargs.pop("use_timm_backbone" ,snake_case__ )
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones" )
__A = kwargs.pop("num_channels" ,config.num_channels )
__A = kwargs.pop("features_only" ,config.features_only )
__A = kwargs.pop("use_pretrained_backbone" ,config.use_pretrained_backbone )
__A = kwargs.pop("out_indices" ,config.out_indices )
__A = TimmBackboneConfig(
backbone=snake_case__ ,num_channels=snake_case__ ,features_only=snake_case__ ,use_pretrained_backbone=snake_case__ ,out_indices=snake_case__ ,)
return super()._from_config(snake_case__ ,**snake_case__ )
def UpperCamelCase_ ( self : Tuple ,A : List[Any] ):
pass
def UpperCamelCase_ ( self : Any ,A : int ,A : int=None ,A : Union[str, Any]=None ,A : Union[str, Any]=None ,**A : Dict ):
__A = return_dict if return_dict is not None else self.config.use_return_dict
__A = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__A = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
__A = self._all_layers
__A = self._backbone(snake_case__ ,**snake_case__ )
__A = self._return_layers
__A = tuple(hidden_states[i] for i in self.out_indices )
else:
__A = self._backbone(snake_case__ ,**snake_case__ )
__A = None
__A = tuple(snake_case__ )
__A = tuple(snake_case__ ) if hidden_states is not None else None
if not return_dict:
__A = (feature_maps,)
if output_hidden_states:
__A = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=snake_case__ ,hidden_states=snake_case__ ,attentions=snake_case__ )
| 15 | import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__snake_case = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
__snake_case = parser.parse_args()
__snake_case = '''cpu'''
__snake_case = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
__snake_case = '''path-to-your-trained-model'''
__snake_case = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__snake_case = pipe.to(device)
# to channels last
__snake_case = pipe.unet.to(memory_format=torch.channels_last)
__snake_case = pipe.vae.to(memory_format=torch.channels_last)
__snake_case = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__snake_case = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__snake_case = torch.randn(2, 4, 64, 64)
__snake_case = torch.rand(1) * 9_99
__snake_case = torch.randn(2, 77, 7_68)
__snake_case = (sample, timestep, encoder_hidden_status)
try:
__snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__snake_case = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__snake_case = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__snake_case = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__snake_case = 6_66
__snake_case = torch.Generator(device).manual_seed(seed)
__snake_case = {'''generator''': generator}
if args.steps is not None:
__snake_case = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__snake_case = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 348 | 0 |
def a( A : Tuple , A : List[Any] , A : Optional[Any] , A : List[str] ) -> int:
"""simple docstring"""
a = len(__lowerCAmelCase ), len(grid[0] )
if (
min(__lowerCAmelCase , __lowerCAmelCase ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
a = 0
count += depth_first_search(__lowerCAmelCase , row + 1 , __lowerCAmelCase , __lowerCAmelCase )
count += depth_first_search(__lowerCAmelCase , row - 1 , __lowerCAmelCase , __lowerCAmelCase )
count += depth_first_search(__lowerCAmelCase , __lowerCAmelCase , col + 1 , __lowerCAmelCase )
count += depth_first_search(__lowerCAmelCase , __lowerCAmelCase , col - 1 , __lowerCAmelCase )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 227 | __snake_case = '''Input must be a string of 8 numbers plus letter'''
__snake_case = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def lowerCAmelCase_ ( __lowerCAmelCase )-> bool:
'''simple docstring'''
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
UpperCAmelCase : Optional[Any] =f'''Expected string as input, found {type(__lowerCAmelCase ).__name__}'''
raise TypeError(__lowerCAmelCase )
UpperCAmelCase : List[Any] =spanish_id.replace('''-''' , '''''' ).upper()
if len(__lowerCAmelCase ) != 9:
raise ValueError(__lowerCAmelCase )
try:
UpperCAmelCase : int =int(spanish_id_clean[0:8] )
UpperCAmelCase : Optional[int] =spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__lowerCAmelCase ) from ex
if letter.isdigit():
raise ValueError(__lowerCAmelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348 | 0 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def a ( ) -> int:
"""simple docstring"""
_lowercase ={
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7],
}
_lowercase =Dataset.from_dict(__lowerCAmelCase )
return dataset
class __lowerCAmelCase ( lowerCamelCase__ ):
def A__ ( self ) -> Tuple:
'''simple docstring'''
_lowercase =get_dataset()
_lowercase =make_duplicate_clusters(snake_case__ , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def A__ ( self ) -> Tuple:
'''simple docstring'''
_lowercase =get_dataset()
_lowercase =deduplicate_dataset(snake_case__ )
self.assertEqual(len(snake_case__ ) , 2 )
print(snake_case__ )
self.assertEqual(duplicate_clusters[0][0]['copies'] , 2 )
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , snake_case__ )
| 205 | def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> str:
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
UpperCAmelCase : Dict =str(bin(__lowerCAmelCase ) )
binary_number += "0" * shift_amount
return binary_number
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> str:
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
UpperCAmelCase : Any =str(bin(__lowerCAmelCase ) )[2:]
if shift_amount >= len(__lowerCAmelCase ):
return "0b0"
UpperCAmelCase : Optional[Any] =binary_number[: len(__lowerCAmelCase ) - shift_amount]
return "0b" + shifted_binary_number
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> str:
'''simple docstring'''
if number >= 0: # Get binary representation of positive number
UpperCAmelCase : Optional[Any] ='''0''' + str(bin(__lowerCAmelCase ) ).strip('''-''' )[2:]
else: # Get binary (2's complement) representation of negative number
UpperCAmelCase : int =len(bin(__lowerCAmelCase )[3:] ) # Find 2's complement of number
UpperCAmelCase : Any =bin(abs(__lowerCAmelCase ) - (1 << binary_number_length) )[3:]
UpperCAmelCase : Optional[Any] =(
'''1''' + '''0''' * (binary_number_length - len(__lowerCAmelCase )) + binary_number
)
if shift_amount >= len(__lowerCAmelCase ):
return "0b" + binary_number[0] * len(__lowerCAmelCase )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__lowerCAmelCase ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348 | 0 |
"""simple docstring"""
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
a :str = logging.get_logger(__name__)
a :List[Any] = TypeVar("DatasetType", Dataset, IterableDataset)
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = "first_exhausted" , ) -> DatasetType:
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("""Unable to interleave an empty list of datasets.""" )
for i, dataset in enumerate(__lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , (Dataset, IterableDataset) ):
if isinstance(__lowerCAmelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
"""is an empty dataset dictionary.""" )
raise ValueError(
F'''Dataset at position {i} has at least one split: {list(__lowerCAmelCase )}\n'''
F'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(__lowerCAmelCase ) )}\']''' )
raise ValueError(
F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__lowerCAmelCase ).__name__}.''' )
if i == 0:
SCREAMING_SNAKE_CASE__ : Optional[int] = (
(Dataset, IterableDataset) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError(
F'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F'''{stopping_strategy} is not supported. Please enter a valid stopping_strategy.''' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , info=__lowerCAmelCase , split=__lowerCAmelCase , stopping_strategy=__lowerCAmelCase )
else:
return _interleave_iterable_datasets(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , info=__lowerCAmelCase , split=__lowerCAmelCase , stopping_strategy=__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 0 , ) -> DatasetType:
if not dsets:
raise ValueError("""Unable to concatenate an empty list of datasets.""" )
for i, dataset in enumerate(__lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , (Dataset, IterableDataset) ):
if isinstance(__lowerCAmelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
"""is an empty dataset dictionary.""" )
raise ValueError(
F'''Dataset at position {i} has at least one split: {list(__lowerCAmelCase )}\n'''
F'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(__lowerCAmelCase ) )}\']''' )
raise ValueError(
F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__lowerCAmelCase ).__name__}.''' )
if i == 0:
SCREAMING_SNAKE_CASE__ : str = (
(Dataset, IterableDataset) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError(
F'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(__lowerCAmelCase , info=__lowerCAmelCase , split=__lowerCAmelCase , axis=__lowerCAmelCase )
else:
return _concatenate_iterable_datasets(__lowerCAmelCase , info=__lowerCAmelCase , split=__lowerCAmelCase , axis=__lowerCAmelCase )
| 132 | from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
# TODO Update this
__snake_case = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : Tuple = """esm"""
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=1026 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__="absolute" , snake_case__=True , snake_case__=None , snake_case__=False , snake_case__=False , snake_case__=None , snake_case__=None , **snake_case__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , mask_token_id=snake_case__ , **snake_case__ )
UpperCAmelCase : List[str] =vocab_size
UpperCAmelCase : str =hidden_size
UpperCAmelCase : List[Any] =num_hidden_layers
UpperCAmelCase : Optional[Any] =num_attention_heads
UpperCAmelCase : str =intermediate_size
UpperCAmelCase : Any =hidden_dropout_prob
UpperCAmelCase : int =attention_probs_dropout_prob
UpperCAmelCase : Dict =max_position_embeddings
UpperCAmelCase : List[str] =initializer_range
UpperCAmelCase : Union[str, Any] =layer_norm_eps
UpperCAmelCase : Dict =position_embedding_type
UpperCAmelCase : Optional[Any] =use_cache
UpperCAmelCase : int =emb_layer_norm_before
UpperCAmelCase : List[str] =token_dropout
UpperCAmelCase : Optional[Any] =is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
UpperCAmelCase : Optional[Any] =EsmFoldConfig()
elif isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase : Optional[int] =EsmFoldConfig(**snake_case__ )
UpperCAmelCase : Tuple =esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
UpperCAmelCase : Any =get_default_vocab_list()
else:
UpperCAmelCase : Tuple =vocab_list
else:
UpperCAmelCase : Optional[int] =None
UpperCAmelCase : Union[str, Any] =None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , snake_case__ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =super().to_dict()
if isinstance(self.esmfold_config , snake_case__ ):
UpperCAmelCase : str =self.esmfold_config.to_dict()
return output
@dataclass
class __snake_case :
__lowerCamelCase : str = None
__lowerCamelCase : bool = True
__lowerCamelCase : bool = False
__lowerCamelCase : bool = False
__lowerCamelCase : bool = False
__lowerCamelCase : float = 0
__lowerCamelCase : bool = True
__lowerCamelCase : bool = False
__lowerCamelCase : int = 128
__lowerCamelCase : "TrunkConfig" = None
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
if self.trunk is None:
UpperCAmelCase : str =TrunkConfig()
elif isinstance(self.trunk , snake_case__ ):
UpperCAmelCase : Optional[int] =TrunkConfig(**self.trunk )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =asdict(self )
UpperCAmelCase : Any =self.trunk.to_dict()
return output
@dataclass
class __snake_case :
__lowerCamelCase : int = 48
__lowerCamelCase : int = 1024
__lowerCamelCase : int = 128
__lowerCamelCase : int = 32
__lowerCamelCase : int = 32
__lowerCamelCase : int = 32
__lowerCamelCase : float = 0
__lowerCamelCase : float = 0
__lowerCamelCase : bool = False
__lowerCamelCase : int = 4
__lowerCamelCase : Optional[int] = 128
__lowerCamelCase : "StructureModuleConfig" = None
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
if self.structure_module is None:
UpperCAmelCase : Any =StructureModuleConfig()
elif isinstance(self.structure_module , snake_case__ ):
UpperCAmelCase : str =StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
UpperCAmelCase : Optional[int] =self.sequence_state_dim // self.sequence_head_width
UpperCAmelCase : Any =self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(f'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =asdict(self )
UpperCAmelCase : Tuple =self.structure_module.to_dict()
return output
@dataclass
class __snake_case :
__lowerCamelCase : int = 384
__lowerCamelCase : int = 128
__lowerCamelCase : int = 16
__lowerCamelCase : int = 128
__lowerCamelCase : int = 12
__lowerCamelCase : int = 4
__lowerCamelCase : int = 8
__lowerCamelCase : float = 0.1
__lowerCamelCase : int = 8
__lowerCamelCase : int = 1
__lowerCamelCase : int = 2
__lowerCamelCase : int = 7
__lowerCamelCase : int = 10
__lowerCamelCase : float = 1E-8
__lowerCamelCase : float = 1E5
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return asdict(self )
def lowerCAmelCase_ ( )-> Tuple:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 348 | 0 |
'''simple docstring'''
_UpperCamelCase = '''Input must be a string of 8 numbers plus letter'''
_UpperCamelCase = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def lowercase_ ( lowerCAmelCase__ : int ):
"""simple docstring"""
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__UpperCAmelCase : Optional[Any] = f'Expected string as input, found {type(__lowerCAmelCase ).__name__}'
raise TypeError(__lowerCAmelCase )
__UpperCAmelCase : List[Any] = spanish_id.replace("""-""" , """""" ).upper()
if len(__lowerCAmelCase ) != 9:
raise ValueError(__lowerCAmelCase )
try:
__UpperCAmelCase : int = int(spanish_id_clean[0:8] )
__UpperCAmelCase : Optional[int] = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__lowerCAmelCase ) from ex
if letter.isdigit():
raise ValueError(__lowerCAmelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 254 | import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : Optional[int] = (KDPMaDiscreteScheduler,)
__lowerCamelCase : List[str] = 10
def UpperCAmelCase__ ( self , **snake_case__ ) -> str:
'''simple docstring'''
UpperCAmelCase : int ={
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**snake_case__ )
return config
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case__ )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=snake_case__ )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case__ )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =self.scheduler_classes[0]
UpperCAmelCase : Optional[int] =self.get_scheduler_config(prediction_type='''v_prediction''' )
UpperCAmelCase : Optional[Any] =scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase : str =self.dummy_model()
UpperCAmelCase : Optional[Any] =self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase : Union[str, Any] =sample.to(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase : str =scheduler.scale_model_input(snake_case__ , snake_case__ )
UpperCAmelCase : Any =model(snake_case__ , snake_case__ )
UpperCAmelCase : Union[str, Any] =scheduler.step(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase : int =output.prev_sample
UpperCAmelCase : Dict =torch.sum(torch.abs(snake_case__ ) )
UpperCAmelCase : Optional[Any] =torch.mean(torch.abs(snake_case__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.69_34e-07 ) < 1e-2
assert abs(result_mean.item() - 6.11_12e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0002 ) < 1e-3
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
if torch_device == "mps":
return
UpperCAmelCase : Any =self.scheduler_classes[0]
UpperCAmelCase : Optional[int] =self.get_scheduler_config()
UpperCAmelCase : Optional[Any] =scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase : Optional[int] =self.dummy_model()
UpperCAmelCase : Union[str, Any] =self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase : str =sample.to(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase : Dict =scheduler.scale_model_input(snake_case__ , snake_case__ )
UpperCAmelCase : Union[str, Any] =model(snake_case__ , snake_case__ )
UpperCAmelCase : List[str] =scheduler.step(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase : Optional[int] =output.prev_sample
UpperCAmelCase : Any =torch.sum(torch.abs(snake_case__ ) )
UpperCAmelCase : Union[str, Any] =torch.mean(torch.abs(snake_case__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
if torch_device == "mps":
return
UpperCAmelCase : List[Any] =self.scheduler_classes[0]
UpperCAmelCase : Dict =self.get_scheduler_config()
UpperCAmelCase : List[str] =scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case__ )
UpperCAmelCase : int =self.dummy_model()
UpperCAmelCase : Tuple =self.dummy_sample_deter.to(snake_case__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
UpperCAmelCase : Optional[Any] =scheduler.scale_model_input(snake_case__ , snake_case__ )
UpperCAmelCase : int =model(snake_case__ , snake_case__ )
UpperCAmelCase : str =scheduler.step(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase : List[str] =output.prev_sample
UpperCAmelCase : List[str] =torch.sum(torch.abs(snake_case__ ) )
UpperCAmelCase : Dict =torch.mean(torch.abs(snake_case__ ) )
if str(snake_case__ ).startswith('''cpu''' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
| 348 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['ConditionalDetrFeatureExtractor']
UpperCAmelCase_ = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 346 | import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Any =FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
UpperCAmelCase : Tuple =AutoTokenizer.from_pretrained('''google/mt5-small''' )
UpperCAmelCase : List[str] =tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
UpperCAmelCase : List[Any] =tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
UpperCAmelCase : Union[str, Any] =shift_tokens_right(snake_case__ , model.config.pad_token_id , model.config.decoder_start_token_id )
UpperCAmelCase : List[str] =model(snake_case__ , decoder_input_ids=snake_case__ ).logits
UpperCAmelCase : Any =optax.softmax_cross_entropy(snake_case__ , onehot(snake_case__ , logits.shape[-1] ) ).mean()
UpperCAmelCase : Union[str, Any] =-(labels.shape[-1] * loss.item())
UpperCAmelCase : List[str] =-84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 348 | 0 |
def UpperCamelCase ( _A = 1000 ):
"""simple docstring"""
__magic_name__ : Dict = 2**power
__magic_name__ : Optional[Any] = 0
while n:
__magic_name__ : List[str] = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 342 | import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __snake_case ( lowerCamelCase__ , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
@property
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : List[Any] =ort.SessionOptions()
UpperCAmelCase : Optional[int] =False
return options
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : int =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
UpperCAmelCase : Optional[Any] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
UpperCAmelCase : List[str] =OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase : Dict ='''A red cat sitting on a park bench'''
UpperCAmelCase : int =np.random.RandomState(0 )
UpperCAmelCase : Any =pipe(
prompt=snake_case__ , image=snake_case__ , mask_image=snake_case__ , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case__ , output_type='''np''' , )
UpperCAmelCase : Dict =output.images
UpperCAmelCase : Optional[int] =images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
UpperCAmelCase : Tuple =np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[str] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
UpperCAmelCase : Tuple =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
UpperCAmelCase : List[str] =LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
UpperCAmelCase : int =OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=snake_case__ , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase : Union[str, Any] ='''A red cat sitting on a park bench'''
UpperCAmelCase : int =np.random.RandomState(0 )
UpperCAmelCase : str =pipe(
prompt=snake_case__ , image=snake_case__ , mask_image=snake_case__ , guidance_scale=7.5 , num_inference_steps=20 , generator=snake_case__ , output_type='''np''' , )
UpperCAmelCase : Dict =output.images
UpperCAmelCase : int =images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
UpperCAmelCase : Union[str, Any] =np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 348 | 0 |
"""simple docstring"""
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase ( lowerCamelCase__ ):
_SCREAMING_SNAKE_CASE = (DDPMParallelScheduler,)
def _snake_case ( self , **lowercase ) -> Dict:
lowerCAmelCase = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**snake_case__ )
return config
def _snake_case ( self ) -> int:
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=snake_case__ )
def _snake_case ( self ) -> Any:
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ )
def _snake_case ( self ) -> Dict:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=snake_case__ )
def _snake_case ( self ) -> int:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=snake_case__ )
def _snake_case ( self ) -> Union[str, Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=snake_case__ )
def _snake_case ( self ) -> List[Any]:
self.check_over_configs(thresholding=snake_case__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=snake_case__ , prediction_type=snake_case__ , sample_max_value=snake_case__ , )
def _snake_case ( self ) -> Any:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case__ )
def _snake_case ( self ) -> List[str]:
for t in [0, 500, 999]:
self.check_over_forward(time_step=snake_case__ )
def _snake_case ( self ) -> Dict:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**snake_case__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**snake_case__ )
lowerCAmelCase = len(snake_case__ )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
lowerCAmelCase = self.dummy_sample_deter + 0.1
lowerCAmelCase = self.dummy_sample_deter - 0.1
lowerCAmelCase = samplea.shape[0]
lowerCAmelCase = torch.stack([samplea, samplea, samplea] , dim=0 )
lowerCAmelCase = torch.arange(snake_case__ )[0:3, None].repeat(1 , snake_case__ )
lowerCAmelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
lowerCAmelCase = scheduler.batch_step_no_noise(snake_case__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
lowerCAmelCase = torch.sum(torch.abs(snake_case__ ) )
lowerCAmelCase = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 1_153.1_833 ) < 1e-2
assert abs(result_mean.item() - 0.5_005 ) < 1e-3
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**snake_case__ )
lowerCAmelCase = len(snake_case__ )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(snake_case__ ) ):
# 1. predict noise residual
lowerCAmelCase = model(snake_case__ , snake_case__ )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase = scheduler.step(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample
lowerCAmelCase = pred_prev_sample
lowerCAmelCase = torch.sum(torch.abs(snake_case__ ) )
lowerCAmelCase = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 258.9_606 ) < 1e-2
assert abs(result_mean.item() - 0.3_372 ) < 1e-3
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(prediction_type="""v_prediction""" )
lowerCAmelCase = scheduler_class(**snake_case__ )
lowerCAmelCase = len(snake_case__ )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(snake_case__ ) ):
# 1. predict noise residual
lowerCAmelCase = model(snake_case__ , snake_case__ )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase = scheduler.step(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample
lowerCAmelCase = pred_prev_sample
lowerCAmelCase = torch.sum(torch.abs(snake_case__ ) )
lowerCAmelCase = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 202.0_296 ) < 1e-2
assert abs(result_mean.item() - 0.2_631 ) < 1e-3
def _snake_case ( self ) -> Dict:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**snake_case__ )
lowerCAmelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=snake_case__ )
lowerCAmelCase = scheduler.timesteps
for i, timestep in enumerate(snake_case__ ):
if i == len(snake_case__ ) - 1:
lowerCAmelCase = -1
else:
lowerCAmelCase = timesteps[i + 1]
lowerCAmelCase = scheduler.previous_timestep(snake_case__ )
lowerCAmelCase = prev_t.item()
self.assertEqual(snake_case__ , snake_case__ )
def _snake_case ( self ) -> int:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**snake_case__ )
lowerCAmelCase = [100, 87, 50, 51, 0]
with self.assertRaises(snake_case__ , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=snake_case__ )
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**snake_case__ )
lowerCAmelCase = [100, 87, 50, 1, 0]
lowerCAmelCase = len(snake_case__ )
with self.assertRaises(snake_case__ , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=snake_case__ , timesteps=snake_case__ )
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**snake_case__ )
lowerCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
snake_case__ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=snake_case__ )
| 46 | from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def lowerCAmelCase_ ( )-> int:
'''simple docstring'''
UpperCAmelCase : str ={
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7],
}
UpperCAmelCase : Union[str, Any] =Dataset.from_dict(__lowerCAmelCase )
return dataset
class __snake_case ( lowerCamelCase__ ):
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : List[str] =get_dataset()
UpperCAmelCase : Optional[int] =make_duplicate_clusters(snake_case__ , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : str =get_dataset()
UpperCAmelCase , UpperCAmelCase : Tuple =deduplicate_dataset(snake_case__ )
self.assertEqual(len(snake_case__ ) , 2 )
print(snake_case__ )
self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , snake_case__ )
| 348 | 0 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A (unittest.TestCase ):
'''simple docstring'''
@property
def a_ ( self : str ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
A__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def a_ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
A__ = self.dummy_uncond_unet
A__ = KarrasVeScheduler()
A__ = KarrasVePipeline(unet=snake_case__ , scheduler=snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
A__ = torch.manual_seed(0 )
A__ = pipe(num_inference_steps=2 , generator=snake_case__ , output_type="""numpy""" ).images
A__ = torch.manual_seed(0 )
A__ = pipe(num_inference_steps=2 , generator=snake_case__ , output_type="""numpy""" , return_dict=snake_case__ )[0]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A__ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class A (unittest.TestCase ):
'''simple docstring'''
def a_ ( self : int ) -> List[Any]:
"""simple docstring"""
A__ = '''google/ncsnpp-celebahq-256'''
A__ = UNetaDModel.from_pretrained(snake_case__ )
A__ = KarrasVeScheduler()
A__ = KarrasVePipeline(unet=snake_case__ , scheduler=snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
A__ = torch.manual_seed(0 )
A__ = pipe(num_inference_steps=20 , generator=snake_case__ , output_type="""numpy""" ).images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
A__ = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 274 | from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
class __snake_case ( lowerCamelCase__ , lowerCamelCase__ ):
@register_to_config
def __init__( self , snake_case__ , snake_case__ = None , snake_case__ = None ) -> str:
'''simple docstring'''
super().__init__()
UpperCAmelCase : Optional[Any] =learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
UpperCAmelCase : Any =torch.zeros(snake_case__ , snake_case__ )
else:
UpperCAmelCase : Union[str, Any] =None
UpperCAmelCase : Optional[int] =torch.nn.Parameter(snake_case__ )
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : VQModel
__lowerCamelCase : CLIPTextModel
__lowerCamelCase : CLIPTokenizer
__lowerCamelCase : TransformeraDModel
__lowerCamelCase : LearnedClassifierFreeSamplingEmbeddings
__lowerCamelCase : VQDiffusionScheduler
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> int:
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=snake_case__ , transformer=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , scheduler=snake_case__ , learned_classifier_free_sampling_embeddings=snake_case__ , )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : int =len(snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else 1
# get prompt text embeddings
UpperCAmelCase : Optional[int] =self.tokenizer(
snake_case__ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
UpperCAmelCase : int =text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCAmelCase : List[str] =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCAmelCase : Optional[Any] =text_input_ids[:, : self.tokenizer.model_max_length]
UpperCAmelCase : List[Any] =self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
UpperCAmelCase : int =prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=snake_case__ )
# duplicate text embeddings for each generation per prompt
UpperCAmelCase : int =prompt_embeds.repeat_interleave(snake_case__ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
UpperCAmelCase : Optional[int] =self.learned_classifier_free_sampling_embeddings.embeddings
UpperCAmelCase : str =negative_prompt_embeds.unsqueeze(0 ).repeat(snake_case__ , 1 , 1 )
else:
UpperCAmelCase : str =[''''''] * batch_size
UpperCAmelCase : Tuple =text_input_ids.shape[-1]
UpperCAmelCase : Optional[Any] =self.tokenizer(
snake_case__ , padding='''max_length''' , max_length=snake_case__ , truncation=snake_case__ , return_tensors='''pt''' , )
UpperCAmelCase : Optional[Any] =self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
UpperCAmelCase : Optional[int] =negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=snake_case__ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase : Optional[Any] =negative_prompt_embeds.shape[1]
UpperCAmelCase : Union[str, Any] =negative_prompt_embeds.repeat(1 , snake_case__ , 1 )
UpperCAmelCase : Optional[Any] =negative_prompt_embeds.view(batch_size * num_images_per_prompt , snake_case__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase : int =torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , snake_case__ , snake_case__ = 100 , snake_case__ = 5.0 , snake_case__ = 1.0 , snake_case__ = 1 , snake_case__ = None , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , snake_case__ = None , snake_case__ = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
if isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase : Optional[int] =1
elif isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase : Tuple =len(snake_case__ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(snake_case__ )}''' )
UpperCAmelCase : Tuple =batch_size * num_images_per_prompt
UpperCAmelCase : List[str] =guidance_scale > 1.0
UpperCAmelCase : List[Any] =self._encode_prompt(snake_case__ , snake_case__ , snake_case__ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case__ , snake_case__ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(snake_case__ )}.''' )
# get the initial completely masked latents unless the user supplied it
UpperCAmelCase : int =(batch_size, self.transformer.num_latent_pixels)
if latents is None:
UpperCAmelCase : Union[str, Any] =self.transformer.num_vector_embeds - 1
UpperCAmelCase : str =torch.full(snake_case__ , snake_case__ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
f''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
UpperCAmelCase : Any =latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(snake_case__ , device=self.device )
UpperCAmelCase : Any =self.scheduler.timesteps.to(self.device )
UpperCAmelCase : Optional[int] =latents
for i, t in enumerate(self.progress_bar(snake_case__ ) ):
# expand the sample if we are doing classifier free guidance
UpperCAmelCase : Optional[Any] =torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
UpperCAmelCase : Optional[int] =self.transformer(snake_case__ , encoder_hidden_states=snake_case__ , timestep=snake_case__ ).sample
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase : str =model_output.chunk(2 )
UpperCAmelCase : Optional[int] =model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(snake_case__ , dim=1 , keepdim=snake_case__ )
UpperCAmelCase : Tuple =self.truncate(snake_case__ , snake_case__ )
# remove `log(0)`'s (`-inf`s)
UpperCAmelCase : Optional[Any] =model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase : int =self.scheduler.step(snake_case__ , timestep=snake_case__ , sample=snake_case__ , generator=snake_case__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase : Optional[int] =self.vqvae.config.vq_embed_dim
UpperCAmelCase : Optional[Any] =(batch_size, self.transformer.height, self.transformer.width, embedding_channels)
UpperCAmelCase : Dict =self.vqvae.quantize.get_codebook_entry(snake_case__ , shape=snake_case__ )
UpperCAmelCase : Tuple =self.vqvae.decode(snake_case__ , force_not_quantize=snake_case__ ).sample
UpperCAmelCase : Union[str, Any] =(image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase : Any =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase : List[str] =self.numpy_to_pil(snake_case__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case__ )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> torch.FloatTensor:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : int =torch.sort(snake_case__ , 1 , descending=snake_case__ )
UpperCAmelCase : Union[str, Any] =torch.exp(snake_case__ )
UpperCAmelCase : Union[str, Any] =sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
UpperCAmelCase : Optional[Any] =torch.full_like(keep_mask[:, 0:1, :] , snake_case__ )
UpperCAmelCase : Tuple =torch.cat((all_true, keep_mask) , dim=1 )
UpperCAmelCase : int =keep_mask[:, :-1, :]
UpperCAmelCase : int =keep_mask.gather(1 , indices.argsort(1 ) )
UpperCAmelCase : Dict =log_p_x_0.clone()
UpperCAmelCase : List[Any] =-torch.inf # -inf = log(0)
return rv
| 348 | 0 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=2 , snake_case=True , snake_case=False , snake_case=10 , snake_case=3 , snake_case=32 * 8 , snake_case=32 * 8 , snake_case=4 , snake_case=64 , ):
lowercase = parent
lowercase = batch_size
lowercase = is_training
lowercase = use_auxiliary_loss
lowercase = num_queries
lowercase = num_channels
lowercase = min_size
lowercase = max_size
lowercase = num_labels
lowercase = hidden_dim
lowercase = hidden_dim
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
snake_case__ )
lowercase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=snake_case__ )
lowercase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=snake_case__ ) > 0.5
).float()
lowercase = (torch.rand((self.batch_size, self.num_labels) , device=snake_case__ ) > 0.5).long()
lowercase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
lowercase = self.num_queries
lowercase = self.num_labels
lowercase = [1, 1, 1, 1]
lowercase = self.num_channels
lowercase = 64
lowercase = 128
lowercase = self.hidden_dim
lowercase = self.hidden_dim
lowercase = self.hidden_dim
return config
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.prepare_config_and_inputs()
lowercase = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = output.encoder_hidden_states
lowercase = output.pixel_decoder_hidden_states
lowercase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(snake_case__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(snake_case__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(snake_case__ ) , config.decoder_layers )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case=False ):
with torch.no_grad():
lowercase = MaskaFormerModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase = model(pixel_values=snake_case__ , pixel_mask=snake_case__ )
lowercase = model(snake_case__ , output_hidden_states=snake_case__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case ):
lowercase = MaskaFormerForUniversalSegmentation(config=snake_case__ )
model.to(snake_case__ )
model.eval()
def comm_check_on_output(snake_case ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowercase = model(pixel_values=snake_case__ , pixel_mask=snake_case__ )
lowercase = model(snake_case__ )
comm_check_on_output(snake_case__ )
lowercase = model(
pixel_values=snake_case__ , pixel_mask=snake_case__ , mask_labels=snake_case__ , class_labels=snake_case__ )
comm_check_on_output(snake_case__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class A_ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : List[Any] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
_UpperCamelCase : int = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
_UpperCamelCase : Dict = False
_UpperCamelCase : Any = False
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : str = False
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = MaskaFormerModelTester(self )
lowercase = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(snake_case__ , **snake_case__ , output_hidden_states=snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*snake_case__ )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(snake_case__ )
lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
lowercase = MaskaFormerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = (self.model_tester.min_size,) * 2
lowercase = {
'''pixel_values''': torch.randn((2, 3, *size) , device=snake_case__ ),
'''mask_labels''': torch.randn((2, 10, *size) , device=snake_case__ ),
'''class_labels''': torch.zeros(2 , 10 , device=snake_case__ ).long(),
}
lowercase = self.model_tester.get_config()
lowercase = MaskaFormerForUniversalSegmentation(snake_case__ ).to(snake_case__ )
lowercase = model(**snake_case__ )
self.assertTrue(outputs.loss is not None )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(snake_case__ , **snake_case__ , output_hidden_states=snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(snake_case__ ).to(snake_case__ )
lowercase = model(**snake_case__ , output_attentions=snake_case__ )
self.assertTrue(outputs.attentions is not None )
def SCREAMING_SNAKE_CASE__ ( self ):
if not self.model_tester.is_training:
return
lowercase = self.all_model_classes[1]
lowercase = self.model_tester.prepare_config_and_inputs()
lowercase = model_class(snake_case__ )
model.to(snake_case__ )
model.train()
lowercase = model(snake_case__ , mask_labels=snake_case__ , class_labels=snake_case__ ).loss
loss.backward()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.all_model_classes[1]
lowercase = self.model_tester.prepare_config_and_inputs()
lowercase = True
lowercase = True
lowercase = model_class(snake_case__ ).to(snake_case__ )
model.train()
lowercase = model(snake_case__ , mask_labels=snake_case__ , class_labels=snake_case__ )
lowercase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowercase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
lowercase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowercase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=snake_case__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
UpperCAmelCase = 1e-4
def UpperCAmelCase_ ( ):
lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(snake_case__ )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(snake_case__ , return_tensors='pt' ).to(snake_case__ )
lowercase = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(snake_case__ , (1, 3, 384, 384) )
with torch.no_grad():
lowercase = model(**snake_case__ )
lowercase = torch.tensor(
[[-0.2_790, -1.0_717, -1.1_668], [-0.5_128, -0.3_128, -0.4_987], [-0.5_832, 0.1_971, -0.0_197]] ).to(snake_case__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , snake_case__ , atol=snake_case__ ) )
lowercase = torch.tensor(
[[0.8_973, 1.1_847, 1.1_776], [1.1_934, 1.5_040, 1.5_128], [1.1_153, 1.4_486, 1.4_951]] ).to(snake_case__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , snake_case__ , atol=snake_case__ ) )
lowercase = torch.tensor(
[[2.1_152, 1.7_000, -0.8_603], [1.5_808, 1.8_004, -0.9_353], [1.6_043, 1.7_495, -0.5_999]] ).to(snake_case__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , snake_case__ , atol=snake_case__ ) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(snake_case__ ).eval()
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(snake_case__ , return_tensors='pt' ).to(snake_case__ )
lowercase = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(snake_case__ , (1, 3, 384, 384) )
with torch.no_grad():
lowercase = model(**snake_case__ )
# masks_queries_logits
lowercase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
lowercase = [
[-8.7_839, -9.0_056, -8.8_121],
[-7.4_104, -7.0_313, -6.5_401],
[-6.6_105, -6.3_427, -6.4_675],
]
lowercase = torch.tensor(snake_case__ ).to(snake_case__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case__ , atol=snake_case__ ) )
# class_queries_logits
lowercase = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
lowercase = torch.tensor(
[
[1.8_324, -8.0_835, -4.1_922],
[0.8_450, -9.0_050, -3.6_053],
[0.3_045, -7.7_293, -3.0_275],
] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case__ , atol=snake_case__ ) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(snake_case__ ).eval()
lowercase = self.default_image_processor
lowercase = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
lowercase = inputs['''pixel_values'''].to(snake_case__ )
lowercase = [el.to(snake_case__ ) for el in inputs['''mask_labels''']]
lowercase = [el.to(snake_case__ ) for el in inputs['''class_labels''']]
with torch.no_grad():
lowercase = model(**snake_case__ )
self.assertTrue(outputs.loss is not None )
| 195 | import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __snake_case ( unittest.TestCase ):
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : Any =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Tuple =self.dummy_uncond_unet
UpperCAmelCase : Optional[int] =KarrasVeScheduler()
UpperCAmelCase : List[Any] =KarrasVePipeline(unet=snake_case__ , scheduler=snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase : List[str] =torch.manual_seed(0 )
UpperCAmelCase : List[str] =pipe(num_inference_steps=2 , generator=snake_case__ , output_type='''numpy''' ).images
UpperCAmelCase : str =torch.manual_seed(0 )
UpperCAmelCase : str =pipe(num_inference_steps=2 , generator=snake_case__ , output_type='''numpy''' , return_dict=snake_case__ )[0]
UpperCAmelCase : Any =image[0, -3:, -3:, -1]
UpperCAmelCase : List[str] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase : int =np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Tuple ='''google/ncsnpp-celebahq-256'''
UpperCAmelCase : int =UNetaDModel.from_pretrained(snake_case__ )
UpperCAmelCase : Dict =KarrasVeScheduler()
UpperCAmelCase : Union[str, Any] =KarrasVePipeline(unet=snake_case__ , scheduler=snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase : Any =torch.manual_seed(0 )
UpperCAmelCase : Tuple =pipe(num_inference_steps=20 , generator=snake_case__ , output_type='''numpy''' ).images
UpperCAmelCase : Optional[int] =image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Tuple =np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 348 | 0 |
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
__A ={
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
__A ={
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : Dict = (images / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase__ : int = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCAmelCase__ : List[str] = numpy_to_pil(__lowerCAmelCase )
return images
def _UpperCamelCase ( UpperCamelCase__ ):
if images.ndim == 3:
UpperCAmelCase__ : Any = images[None, ...]
UpperCAmelCase__ : Optional[Any] = (images * 2_5_5).round().astype("""uint8""" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
UpperCAmelCase__ : List[str] = [Image.fromarray(image.squeeze() , mode="""L""" ) for image in images]
else:
UpperCAmelCase__ : str = [Image.fromarray(__lowerCAmelCase ) for image in images]
return pil_images | 163 | import qiskit
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> qiskit.result.counts.Counts:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =qiskit.Aer.get_backend('''aer_simulator''' )
UpperCAmelCase : List[str] =qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
UpperCAmelCase : Dict =qiskit.execute(__lowerCAmelCase , __lowerCAmelCase , shots=10_00 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(__lowerCAmelCase )
if __name__ == "__main__":
__snake_case = half_adder(1, 1)
print(f'Half Adder Output Qubit Counts: {counts}')
| 348 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE :Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :int = {
'microsoft/swin-tiny-patch4-window7-224': (
'https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
snake_case_ = """swin"""
snake_case_ = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : int ,A : Optional[Any]=2_24 ,A : int=4 ,A : Optional[int]=3 ,A : Union[str, Any]=96 ,A : Any=[2, 2, 6, 2] ,A : Dict=[3, 6, 12, 24] ,A : Optional[int]=7 ,A : Optional[Any]=4.0 ,A : int=True ,A : List[str]=0.0 ,A : int=0.0 ,A : Dict=0.1 ,A : Dict="gelu" ,A : List[Any]=False ,A : Dict=0.02 ,A : Optional[Any]=1E-5 ,A : Optional[int]=32 ,A : int=None ,A : Dict=None ,**A : Optional[Any] ,):
super().__init__(**snake_case__ )
__A = image_size
__A = patch_size
__A = num_channels
__A = embed_dim
__A = depths
__A = len(snake_case__ )
__A = num_heads
__A = window_size
__A = mlp_ratio
__A = qkv_bias
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = drop_path_rate
__A = hidden_act
__A = use_absolute_embeddings
__A = layer_norm_eps
__A = initializer_range
__A = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__A = int(embed_dim * 2 ** (len(snake_case__ ) - 1) )
__A = ['''stem'''] + [f'''stage{idx}''' for idx in range(1 ,len(snake_case__ ) + 1 )]
__A = get_aligned_output_features_output_indices(
out_features=snake_case__ ,out_indices=snake_case__ ,stage_names=self.stage_names )
class UpperCAmelCase ( lowerCamelCase__ ):
'''simple docstring'''
snake_case_ = version.parse("1.11" )
@property
def UpperCamelCase_ ( self : Tuple ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCamelCase_ ( self : Dict ):
return 1E-4
| 15 | from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class __snake_case :
__lowerCamelCase : str = BlenderbotConfig
__lowerCamelCase : Optional[Any] = {}
__lowerCamelCase : Optional[int] = """gelu"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=False , snake_case__=99 , snake_case__=32 , snake_case__=2 , snake_case__=4 , snake_case__=37 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=20 , snake_case__=2 , snake_case__=1 , snake_case__=0 , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =parent
UpperCAmelCase : Optional[int] =batch_size
UpperCAmelCase : Dict =seq_length
UpperCAmelCase : Optional[Any] =is_training
UpperCAmelCase : List[str] =use_labels
UpperCAmelCase : List[Any] =vocab_size
UpperCAmelCase : Optional[int] =hidden_size
UpperCAmelCase : Tuple =num_hidden_layers
UpperCAmelCase : Any =num_attention_heads
UpperCAmelCase : Optional[int] =intermediate_size
UpperCAmelCase : str =hidden_dropout_prob
UpperCAmelCase : Optional[int] =attention_probs_dropout_prob
UpperCAmelCase : str =max_position_embeddings
UpperCAmelCase : List[Any] =eos_token_id
UpperCAmelCase : Optional[int] =pad_token_id
UpperCAmelCase : Tuple =bos_token_id
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[Any] =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase : List[Any] =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase : Tuple =tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Optional[Any] =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase : List[str] =prepare_blenderbot_inputs_dict(snake_case__ , snake_case__ , snake_case__ )
return config, inputs_dict
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> int:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =TFBlenderbotModel(config=snake_case__ ).get_decoder()
UpperCAmelCase : Any =inputs_dict['''input_ids''']
UpperCAmelCase : str =input_ids[:1, :]
UpperCAmelCase : Tuple =inputs_dict['''attention_mask'''][:1, :]
UpperCAmelCase : Tuple =inputs_dict['''head_mask''']
UpperCAmelCase : List[Any] =1
# first forward pass
UpperCAmelCase : List[str] =model(snake_case__ , attention_mask=snake_case__ , head_mask=snake_case__ , use_cache=snake_case__ )
UpperCAmelCase , UpperCAmelCase : str =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase : Union[str, Any] =ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase : List[Any] =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase : Tuple =tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase : int =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase : Optional[int] =model(snake_case__ , attention_mask=snake_case__ )[0]
UpperCAmelCase : str =model(snake_case__ , attention_mask=snake_case__ , past_key_values=snake_case__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase : List[Any] =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase : List[Any] =output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase : Dict =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(snake_case__ , snake_case__ , rtol=1e-3 )
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , )-> str:
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase : int =tf.cast(tf.math.not_equal(__lowerCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase : Tuple =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase : str =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase : Union[str, Any] =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase : int =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __snake_case ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : List[str] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__lowerCamelCase : Dict = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__lowerCamelCase : Dict = (
{
"""conversational""": TFBlenderbotForConditionalGeneration,
"""feature-extraction""": TFBlenderbotModel,
"""summarization""": TFBlenderbotForConditionalGeneration,
"""text2text-generation""": TFBlenderbotForConditionalGeneration,
"""translation""": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowerCamelCase : Union[str, Any] = True
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : Union[str, Any] = False
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : List[str] =TFBlenderbotModelTester(self )
UpperCAmelCase : List[Any] =ConfigTester(self , config_class=snake_case__ )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : int =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case__ )
@require_tokenizers
@require_tf
class __snake_case ( unittest.TestCase ):
__lowerCamelCase : List[str] = ["""My friends are cool but they eat too many carbs."""]
__lowerCamelCase : Dict = """facebook/blenderbot-400M-distill"""
@cached_property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : int =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Optional[int] =self.tokenizer(self.src_text , return_tensors='''tf''' )
UpperCAmelCase : Optional[int] =self.model.generate(
model_inputs.input_ids , )
UpperCAmelCase : str =self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=snake_case__ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 348 | 0 |
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"compression_format, is_archive" , [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] , )
def a( A : Dict , A : Union[str, Any] , A : Dict , A : Any , A : Tuple , A : Dict , A : int , A : str , A : List[str] , A : Optional[int] , A : str , A : Union[str, Any] , ) -> List[Any]:
"""simple docstring"""
a = {
'''7z''': (seven_zip_file, SevenZipExtractor),
'''bz2''': (bza_file, BzipaExtractor),
'''gzip''': (gz_file, GzipExtractor),
'''lz4''': (lza_file, LzaExtractor),
'''tar''': (tar_file, TarExtractor),
'''xz''': (xz_file, XzExtractor),
'''zip''': (zip_file, ZipExtractor),
'''zstd''': (zstd_file, ZstdExtractor),
}
a = input_paths_and_base_extractors[compression_format]
if input_path is None:
a = f'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__lowerCAmelCase )
assert base_extractor.is_extractable(__lowerCAmelCase )
a = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
base_extractor.extract(__lowerCAmelCase , __lowerCAmelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
a = file_path.read_text(encoding="utf-8" )
else:
a = output_path.read_text(encoding="utf-8" )
a = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"compression_format, is_archive" , [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] , )
def a( A : List[Any] , A : List[Any] , A : Tuple , A : Union[str, Any] , A : Dict , A : Any , A : Optional[Any] , A : Any , A : int , A : Dict , A : List[str] , A : Dict , ) -> str:
"""simple docstring"""
a = {
'''7z''': seven_zip_file,
'''bz2''': bza_file,
'''gzip''': gz_file,
'''lz4''': lza_file,
'''tar''': tar_file,
'''xz''': xz_file,
'''zip''': zip_file,
'''zstd''': zstd_file,
}
a = input_paths[compression_format]
if input_path is None:
a = f'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__lowerCAmelCase )
a = Extractor.infer_extractor_format(__lowerCAmelCase )
assert extractor_format is not None
a = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
Extractor.extract(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
a = file_path.read_text(encoding="utf-8" )
else:
a = output_path.read_text(encoding="utf-8" )
a = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def a( A : Tuple , A : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
import tarfile
a = tmp_path / '''data_dot_dot'''
directory.mkdir()
a = directory / '''tar_file_with_dot_dot.tar'''
with tarfile.TarFile(__lowerCAmelCase , "w" ) as f:
f.add(__lowerCAmelCase , arcname=os.path.join(".." , text_file.name ) )
return path
@pytest.fixture
def a( A : List[str] ) -> int:
"""simple docstring"""
import tarfile
a = tmp_path / '''data_sym_link'''
directory.mkdir()
a = directory / '''tar_file_with_sym_link.tar'''
os.symlink(".." , directory / "subdir" , target_is_directory=__lowerCAmelCase )
with tarfile.TarFile(__lowerCAmelCase , "w" ) as f:
f.add(str(directory / "subdir" ) , arcname="subdir" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"insecure_tar_file, error_log" , [("tar_file_with_dot_dot", "illegal path"), ("tar_file_with_sym_link", "Symlink")] , )
def a( A : Dict , A : Optional[int] , A : Dict , A : Optional[Any] , A : List[Any] , A : int ) -> Tuple:
"""simple docstring"""
a = {
'''tar_file_with_dot_dot''': tar_file_with_dot_dot,
'''tar_file_with_sym_link''': tar_file_with_sym_link,
}
a = insecure_tar_files[insecure_tar_file]
a = tmp_path / '''extracted'''
TarExtractor.extract(__lowerCAmelCase , __lowerCAmelCase )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def a( A : List[str] ) -> str:
"""simple docstring"""
a = tmpdir / '''not_a_zip_file'''
# From: https://github.com/python/cpython/pull/5053
a = (
b'''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'''
b'''\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'''
b'''DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'''
b'''\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'''
)
with not_a_zip_file.open("wb" ) as f:
f.write(__lowerCAmelCase )
assert zipfile.is_zipfile(str(__lowerCAmelCase ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(__lowerCAmelCase ) # but we're right
| 227 | import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] = """sew-d"""
def __init__( self , snake_case__=32 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__=2 , snake_case__=512 , snake_case__=256 , snake_case__=True , snake_case__=True , snake_case__=("p2c", "c2p") , snake_case__="layer_norm" , snake_case__="gelu_python" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.1 , snake_case__=0.02 , snake_case__=1e-7 , snake_case__=1e-5 , snake_case__="group" , snake_case__="gelu" , snake_case__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , snake_case__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , snake_case__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , snake_case__=False , snake_case__=128 , snake_case__=16 , snake_case__=True , snake_case__=0.05 , snake_case__=10 , snake_case__=2 , snake_case__=0.0 , snake_case__=10 , snake_case__=0 , snake_case__="mean" , snake_case__=False , snake_case__=False , snake_case__=256 , snake_case__=0 , snake_case__=1 , snake_case__=2 , **snake_case__ , ) -> int:
'''simple docstring'''
super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ )
UpperCAmelCase : Union[str, Any] =hidden_size
UpperCAmelCase : Union[str, Any] =feat_extract_norm
UpperCAmelCase : Optional[Any] =feat_extract_activation
UpperCAmelCase : List[str] =list(snake_case__ )
UpperCAmelCase : int =list(snake_case__ )
UpperCAmelCase : List[str] =list(snake_case__ )
UpperCAmelCase : str =conv_bias
UpperCAmelCase : Tuple =num_conv_pos_embeddings
UpperCAmelCase : Dict =num_conv_pos_embedding_groups
UpperCAmelCase : str =len(self.conv_dim )
UpperCAmelCase : Dict =num_hidden_layers
UpperCAmelCase : Optional[int] =intermediate_size
UpperCAmelCase : List[Any] =squeeze_factor
UpperCAmelCase : str =max_position_embeddings
UpperCAmelCase : int =position_buckets
UpperCAmelCase : Optional[int] =share_att_key
UpperCAmelCase : Optional[int] =relative_attention
UpperCAmelCase : Tuple =norm_rel_ebd
UpperCAmelCase : List[Any] =list(snake_case__ )
UpperCAmelCase : Dict =hidden_act
UpperCAmelCase : Optional[int] =num_attention_heads
UpperCAmelCase : Any =hidden_dropout
UpperCAmelCase : str =attention_dropout
UpperCAmelCase : Union[str, Any] =activation_dropout
UpperCAmelCase : str =feat_proj_dropout
UpperCAmelCase : Union[str, Any] =final_dropout
UpperCAmelCase : Optional[int] =layer_norm_eps
UpperCAmelCase : str =feature_layer_norm_eps
UpperCAmelCase : str =initializer_range
UpperCAmelCase : Any =vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase : Union[str, Any] =apply_spec_augment
UpperCAmelCase : Optional[Any] =mask_time_prob
UpperCAmelCase : Tuple =mask_time_length
UpperCAmelCase : str =mask_time_min_masks
UpperCAmelCase : Optional[int] =mask_feature_prob
UpperCAmelCase : Optional[Any] =mask_feature_length
UpperCAmelCase : List[Any] =mask_feature_min_masks
# ctc loss
UpperCAmelCase : str =ctc_loss_reduction
UpperCAmelCase : Optional[int] =ctc_zero_infinity
# sequence classification
UpperCAmelCase : Union[str, Any] =use_weighted_layer_sum
UpperCAmelCase : int =classifier_proj_size
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 348 | 0 |
import math
lowercase_ = 1_0
lowercase_ = 7
lowercase_ = BALLS_PER_COLOUR * NUM_COLOURS
def a ( A__ : List[str] = 20 ) -> str:
"""simple docstring"""
_lowercase =math.comb(__lowerCAmelCase , __lowerCAmelCase )
_lowercase =math.comb(NUM_BALLS - BALLS_PER_COLOUR , __lowerCAmelCase )
_lowercase =NUM_COLOURS * (1 - missing_colour / total)
return F'''{result:.9f}'''
if __name__ == "__main__":
print(solution(2_0))
| 205 | import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
__snake_case = 4
__snake_case = 3
class __snake_case ( lowerCamelCase__ ):
pass
def lowerCAmelCase_ ( __lowerCAmelCase )-> List[str]:
'''simple docstring'''
for shard in shards:
for i in range(__lowerCAmelCase ):
yield {"i": i, "shard": shard}
def lowerCAmelCase_ ( )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase : List[str] =int(os.environ['''RANK'''] )
UpperCAmelCase : Optional[Any] =int(os.environ['''WORLD_SIZE'''] )
UpperCAmelCase : List[Any] =ArgumentParser()
parser.add_argument('''--streaming''' , type=__lowerCAmelCase )
parser.add_argument('''--local_rank''' , type=__lowerCAmelCase )
parser.add_argument('''--num_workers''' , type=__lowerCAmelCase , default=0 )
UpperCAmelCase : Any =parser.parse_args()
UpperCAmelCase : List[str] =args.streaming
UpperCAmelCase : Tuple =args.num_workers
UpperCAmelCase : int ={'''shards''': [f'''shard_{shard_idx}''' for shard_idx in range(__lowerCAmelCase )]}
UpperCAmelCase : Optional[int] =IterableDataset.from_generator(__lowerCAmelCase , gen_kwargs=__lowerCAmelCase )
if not streaming:
UpperCAmelCase : List[Any] =Dataset.from_list(list(__lowerCAmelCase ) )
UpperCAmelCase : Dict =split_dataset_by_node(__lowerCAmelCase , rank=__lowerCAmelCase , world_size=__lowerCAmelCase )
UpperCAmelCase : List[Any] =torch.utils.data.DataLoader(__lowerCAmelCase , num_workers=__lowerCAmelCase )
UpperCAmelCase : Dict =NUM_SHARDS * NUM_ITEMS_PER_SHARD
UpperCAmelCase : str =full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
UpperCAmelCase : List[Any] =sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''' )
if __name__ == "__main__":
main()
| 348 | 0 |
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self ) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = 1
SCREAMING_SNAKE_CASE__ : str = 3
SCREAMING_SNAKE_CASE__ : Tuple = (32, 32)
SCREAMING_SNAKE_CASE__ : List[str] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(snake_case__ )
return image
@property
def _a ( self ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def _a ( self ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def _a ( self ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(snake_case__ )
@property
def _a ( self ) -> Dict:
"""simple docstring"""
def extract(*_a , **_a ):
class __a :
'''simple docstring'''
def __init__( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = torch.ones([0] )
def _a ( self , _a ) -> Any:
"""simple docstring"""
self.pixel_values.to(snake_case__ )
return self
return Out()
return extract
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.dummy_cond_unet
SCREAMING_SNAKE_CASE__ : Optional[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , )
SCREAMING_SNAKE_CASE__ : List[Any] = self.dummy_vae
SCREAMING_SNAKE_CASE__ : Any = self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE__ : Union[str, Any] = StableDiffusionPipeline(
unet=snake_case__ , scheduler=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , safety_checker=snake_case__ , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE__ : int = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE__ : Dict = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.Generator(device=snake_case__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = sd_pipe([prompt] , generator=snake_case__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
SCREAMING_SNAKE_CASE__ : Dict = output.images
SCREAMING_SNAKE_CASE__ : int = torch.Generator(device=snake_case__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Any = sd_pipe(
[prompt] , generator=snake_case__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=snake_case__ , )[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array([0.5_756, 0.6_118, 0.5_005, 0.5_041, 0.5_471, 0.4_726, 0.4_976, 0.4_865, 0.4_864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : Tuple = self.dummy_cond_unet
SCREAMING_SNAKE_CASE__ : Any = PNDMScheduler(skip_prk_steps=snake_case__ )
SCREAMING_SNAKE_CASE__ : List[str] = self.dummy_vae
SCREAMING_SNAKE_CASE__ : Dict = self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ : Any = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE__ : Optional[Any] = StableDiffusionPipeline(
unet=snake_case__ , scheduler=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , safety_checker=snake_case__ , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE__ : List[str] = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE__ : List[Any] = torch.Generator(device=snake_case__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = sd_pipe([prompt] , generator=snake_case__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
SCREAMING_SNAKE_CASE__ : List[str] = output.images
SCREAMING_SNAKE_CASE__ : Dict = torch.Generator(device=snake_case__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = sd_pipe(
[prompt] , generator=snake_case__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=snake_case__ , )[0]
SCREAMING_SNAKE_CASE__ : Dict = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ : Tuple = np.array([0.5_125, 0.5_716, 0.4_828, 0.5_060, 0.5_650, 0.4_768, 0.5_185, 0.4_895, 0.4_993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=snake_case__ )
assert isinstance(snake_case__ , snake_case__ )
assert isinstance(pipe.scheduler , snake_case__ )
assert pipe.safety_checker is None
SCREAMING_SNAKE_CASE__ : Any = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE__ : List[Any] = StableDiffusionPipeline.from_pretrained(snake_case__ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
SCREAMING_SNAKE_CASE__ : int = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.dummy_cond_unet
SCREAMING_SNAKE_CASE__ : str = PNDMScheduler(skip_prk_steps=snake_case__ )
SCREAMING_SNAKE_CASE__ : Tuple = self.dummy_vae
SCREAMING_SNAKE_CASE__ : Dict = self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ : List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
SCREAMING_SNAKE_CASE__ : Union[str, Any] = unet.half()
SCREAMING_SNAKE_CASE__ : Any = vae.half()
SCREAMING_SNAKE_CASE__ : List[str] = bert.half()
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE__ : List[Any] = StableDiffusionPipeline(
unet=snake_case__ , scheduler=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , safety_checker=snake_case__ , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE__ : Optional[int] = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE__ : Tuple = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE__ : Any = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=snake_case__ )
SCREAMING_SNAKE_CASE__ : Tuple = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
SCREAMING_SNAKE_CASE__ : int = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE__ : Dict = (
'''portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'''
''' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'''
''' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'''
''' children from bahnhof zoo, detailed '''
)
SCREAMING_SNAKE_CASE__ : str = 4_003_660_346
SCREAMING_SNAKE_CASE__ : Optional[Any] = 7
# without safety guidance (sld_guidance_scale = 0)
SCREAMING_SNAKE_CASE__ : Tuple = torch.manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE__ : str = sd_pipe(
[prompt] , generator=snake_case__ , guidance_scale=snake_case__ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
SCREAMING_SNAKE_CASE__ : Any = output.images
SCREAMING_SNAKE_CASE__ : Tuple = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : List[str] = [0.2_278, 0.2_231, 0.2_249, 0.2_333, 0.2_303, 0.1_885, 0.2_273, 0.2_144, 0.2_176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sd_pipe(
[prompt] , generator=snake_case__ , guidance_scale=snake_case__ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
SCREAMING_SNAKE_CASE__ : Tuple = output.images
SCREAMING_SNAKE_CASE__ : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : List[Any] = [0.2_383, 0.2_276, 0.236, 0.2_192, 0.2_186, 0.2_053, 0.1_971, 0.1_901, 0.1_719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=snake_case__ )
SCREAMING_SNAKE_CASE__ : List[Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
SCREAMING_SNAKE_CASE__ : Dict = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE__ : Dict = '''padme amidala taking a bath artwork, safe for work, no nudity'''
SCREAMING_SNAKE_CASE__ : Any = 2_734_971_755
SCREAMING_SNAKE_CASE__ : List[str] = 7
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE__ : Any = sd_pipe(
[prompt] , generator=snake_case__ , guidance_scale=snake_case__ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
SCREAMING_SNAKE_CASE__ : List[str] = output.images
SCREAMING_SNAKE_CASE__ : Optional[int] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : str = [0.3_502, 0.3_622, 0.3_396, 0.3_642, 0.3_478, 0.3_318, 0.35, 0.3_348, 0.3_297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
SCREAMING_SNAKE_CASE__ : Any = torch.manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE__ : str = sd_pipe(
[prompt] , generator=snake_case__ , guidance_scale=snake_case__ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
SCREAMING_SNAKE_CASE__ : Dict = output.images
SCREAMING_SNAKE_CASE__ : Tuple = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : List[Any] = [0.5_531, 0.5_206, 0.4_895, 0.5_156, 0.5_182, 0.4_751, 0.4_802, 0.4_803, 0.4_443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
SCREAMING_SNAKE_CASE__ : str = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE__ : Tuple = (
'''the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'''
''' leyendecker'''
)
SCREAMING_SNAKE_CASE__ : Any = 1_044_355_234
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 12
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE__ : List[str] = sd_pipe(
[prompt] , generator=snake_case__ , guidance_scale=snake_case__ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
SCREAMING_SNAKE_CASE__ : Dict = output.images
SCREAMING_SNAKE_CASE__ : List[str] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : Dict = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
SCREAMING_SNAKE_CASE__ : Tuple = torch.manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE__ : List[str] = sd_pipe(
[prompt] , generator=snake_case__ , guidance_scale=snake_case__ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
SCREAMING_SNAKE_CASE__ : Dict = output.images
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : Dict = np.array([0.5_818, 0.6_285, 0.6_835, 0.6_019, 0.625, 0.6_754, 0.6_096, 0.6_334, 0.6_561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 132 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 348 | 0 |
'''simple docstring'''
import qiskit
def lowercase_ ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = qiskit.Aer.get_backend("""aer_simulator""" )
__UpperCAmelCase : List[str] = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
__UpperCAmelCase : Dict = qiskit.execute(__lowerCAmelCase , __lowerCAmelCase , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(__lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase = half_adder(1, 1)
print(F'Half Adder Output Qubit Counts: {counts}')
| 254 | import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __snake_case :
def __init__( self , snake_case__ , snake_case__=14 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=False , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=4 , snake_case__=4 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=0.02 , ) -> str:
'''simple docstring'''
UpperCAmelCase : str =parent
UpperCAmelCase : Tuple =batch_size
UpperCAmelCase : Optional[int] =seq_length
UpperCAmelCase : Optional[int] =is_training
UpperCAmelCase : Tuple =use_input_mask
UpperCAmelCase : List[Any] =use_token_type_ids
UpperCAmelCase : Optional[Any] =use_labels
UpperCAmelCase : Union[str, Any] =vocab_size
UpperCAmelCase : List[Any] =hidden_size
UpperCAmelCase : Optional[int] =rotary_dim
UpperCAmelCase : Union[str, Any] =num_hidden_layers
UpperCAmelCase : List[Any] =num_attention_heads
UpperCAmelCase : Dict =intermediate_size
UpperCAmelCase : Union[str, Any] =hidden_act
UpperCAmelCase : Any =hidden_dropout_prob
UpperCAmelCase : Dict =attention_probs_dropout_prob
UpperCAmelCase : Union[str, Any] =max_position_embeddings
UpperCAmelCase : str =initializer_range
UpperCAmelCase : Optional[int] =None
UpperCAmelCase : List[Any] =vocab_size - 1
UpperCAmelCase : Optional[Any] =vocab_size - 1
UpperCAmelCase : List[Any] =vocab_size - 1
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : List[Any] =None
if self.use_input_mask:
UpperCAmelCase : Optional[Any] =random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Dict =GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=snake_case__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Tuple =self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] =config_and_inputs
UpperCAmelCase : Tuple ={'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Any =20
UpperCAmelCase : Any =model_class_name(snake_case__ )
UpperCAmelCase : str =model.init_cache(input_ids.shape[0] , snake_case__ )
UpperCAmelCase : Any =jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
UpperCAmelCase : Optional[Any] =jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
UpperCAmelCase : Optional[Any] =model(
input_ids[:, :-1] , attention_mask=snake_case__ , past_key_values=snake_case__ , position_ids=snake_case__ , )
UpperCAmelCase : List[str] =jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase : Optional[Any] =model(
input_ids[:, -1:] , attention_mask=snake_case__ , past_key_values=outputs_cache.past_key_values , position_ids=snake_case__ , )
UpperCAmelCase : List[Any] =model(snake_case__ )
UpperCAmelCase : Any =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Dict =20
UpperCAmelCase : Dict =model_class_name(snake_case__ )
UpperCAmelCase : Tuple =jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
UpperCAmelCase : Dict =model.init_cache(input_ids.shape[0] , snake_case__ )
UpperCAmelCase : int =jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
UpperCAmelCase : Optional[Any] =model(
input_ids[:, :-1] , attention_mask=snake_case__ , past_key_values=snake_case__ , position_ids=snake_case__ , )
UpperCAmelCase : Any =jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase : str =model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=snake_case__ , position_ids=snake_case__ , )
UpperCAmelCase : Any =model(snake_case__ , attention_mask=snake_case__ )
UpperCAmelCase : Dict =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class __snake_case ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Tuple = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
__lowerCamelCase : Optional[Any] = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =FlaxGPTJModelTester(self )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
@tooslow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Tuple =GPTaTokenizer.from_pretrained('''gpt2''' , pad_token='''<|endoftext|>''' , padding_side='''left''' )
UpperCAmelCase : Optional[Any] =tokenizer(['''Hello this is a long string''', '''Hey'''] , return_tensors='''np''' , padding=snake_case__ , truncation=snake_case__ )
UpperCAmelCase : Optional[int] =FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''' )
UpperCAmelCase : str =False
UpperCAmelCase : Union[str, Any] =model.config.eos_token_id
UpperCAmelCase : List[Any] =jax.jit(model.generate )
UpperCAmelCase : Dict =jit_generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , pad_token_id=tokenizer.pad_token_id ).sequences
UpperCAmelCase : Any =tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )
UpperCAmelCase : Tuple =[
'''Hello this is a long string of text.\n\nI\'m trying to get the text of the''',
'''Hey, I\'m a little late to the party. I\'m going to''',
]
self.assertListEqual(snake_case__ , snake_case__ )
@is_pt_flax_cross_test
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
UpperCAmelCase : Union[str, Any] =self._prepare_for_class(snake_case__ , snake_case__ )
UpperCAmelCase : List[str] ={k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
UpperCAmelCase : Any =model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCAmelCase : Any =getattr(snake_case__ , snake_case__ )
UpperCAmelCase , UpperCAmelCase : Union[str, Any] =pt_inputs['''input_ids'''].shape
UpperCAmelCase : Tuple =np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(snake_case__ ):
UpperCAmelCase : int =0
UpperCAmelCase : Optional[int] =1
UpperCAmelCase : Optional[int] =0
UpperCAmelCase : Union[str, Any] =1
UpperCAmelCase : List[str] =pt_model_class(snake_case__ ).eval()
UpperCAmelCase : Optional[int] =model_class(snake_case__ , dtype=jnp.floataa )
UpperCAmelCase : Any =convert_pytorch_state_dict_to_flax(pt_model.state_dict() , snake_case__ )
UpperCAmelCase : Union[str, Any] =fx_state
with torch.no_grad():
UpperCAmelCase : Any =pt_model(**snake_case__ ).to_tuple()
UpperCAmelCase : Dict =fx_model(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(snake_case__ , snake_case__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(snake_case__ )
UpperCAmelCase : str =model_class.from_pretrained(snake_case__ , from_pt=snake_case__ )
UpperCAmelCase : int =fx_model_loaded(**snake_case__ ).to_tuple()
self.assertEqual(
len(snake_case__ ) , len(snake_case__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(snake_case__ , snake_case__ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Any =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
UpperCAmelCase : Union[str, Any] =self._prepare_for_class(snake_case__ , snake_case__ )
UpperCAmelCase : Union[str, Any] ={k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
UpperCAmelCase : int =model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCAmelCase : int =getattr(snake_case__ , snake_case__ )
UpperCAmelCase : Dict =pt_model_class(snake_case__ ).eval()
UpperCAmelCase : str =model_class(snake_case__ , dtype=jnp.floataa )
UpperCAmelCase : Optional[Any] =load_flax_weights_in_pytorch_model(snake_case__ , fx_model.params )
UpperCAmelCase , UpperCAmelCase : Optional[int] =pt_inputs['''input_ids'''].shape
UpperCAmelCase : Optional[int] =np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(snake_case__ ):
UpperCAmelCase : str =0
UpperCAmelCase : Any =1
UpperCAmelCase : List[Any] =0
UpperCAmelCase : Tuple =1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
UpperCAmelCase : Optional[Any] =pt_model(**snake_case__ ).to_tuple()
UpperCAmelCase : List[Any] =fx_model(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(snake_case__ , snake_case__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(snake_case__ )
UpperCAmelCase : Tuple =pt_model_class.from_pretrained(snake_case__ , from_flax=snake_case__ )
with torch.no_grad():
UpperCAmelCase : Any =pt_model_loaded(**snake_case__ ).to_tuple()
self.assertEqual(
len(snake_case__ ) , len(snake_case__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(snake_case__ , snake_case__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase : str =model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''' )
UpperCAmelCase : Tuple =model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case__ )
| 348 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
UpperCAmelCase_ = None
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
UpperCAmelCase_ = {
'facebook/nllb-large-en-ro': 1_0_2_4,
'facebook/nllb-200-distilled-600M': 1_0_2_4,
}
# fmt: off
UpperCAmelCase_ = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class lowerCAmelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
lowerCAmelCase_ : str = VOCAB_FILES_NAMES
lowerCAmelCase_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : List[str] = ["""input_ids""", """attention_mask"""]
lowerCAmelCase_ : Any = NllbTokenizer
lowerCAmelCase_ : List[int] = []
lowerCAmelCase_ : List[int] = []
def __init__( self : Union[str, Any] , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : str="<s>" , _UpperCAmelCase : List[Any]="</s>" , _UpperCAmelCase : List[Any]="</s>" , _UpperCAmelCase : List[Any]="<s>" , _UpperCAmelCase : List[str]="<unk>" , _UpperCAmelCase : List[str]="<pad>" , _UpperCAmelCase : Union[str, Any]="<mask>" , _UpperCAmelCase : str=None , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : Any=False , **_UpperCAmelCase : int , ):
"""simple docstring"""
UpperCAmelCase__ = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
UpperCAmelCase__ = legacy_behaviour
super().__init__(
vocab_file=snake_case__ , tokenizer_file=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , src_lang=snake_case__ , tgt_lang=snake_case__ , additional_special_tokens=snake_case__ , legacy_behaviour=snake_case__ , **snake_case__ , )
UpperCAmelCase__ = vocab_file
UpperCAmelCase__ = False if not self.vocab_file else True
UpperCAmelCase__ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
UpperCAmelCase__ = {
lang_code: self.convert_tokens_to_ids(snake_case__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCAmelCase__ = src_lang if src_lang is not None else '''eng_Latn'''
UpperCAmelCase__ = self.convert_tokens_to_ids(self._src_lang )
UpperCAmelCase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : Dict ):
"""simple docstring"""
UpperCAmelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : str , _UpperCAmelCase : Any = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] = None ):
"""simple docstring"""
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Tuple ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
UpperCAmelCase__ = src_lang
UpperCAmelCase__ = self(snake_case__ , add_special_tokens=snake_case__ , return_tensors=snake_case__ , **snake_case__ )
UpperCAmelCase__ = self.convert_tokens_to_ids(snake_case__ )
UpperCAmelCase__ = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str = "eng_Latn" , _UpperCAmelCase : List[str] = None , _UpperCAmelCase : List[str] = "fra_Latn" , **_UpperCAmelCase : Any , ):
"""simple docstring"""
UpperCAmelCase__ = src_lang
UpperCAmelCase__ = tgt_lang
return super().prepare_seqaseq_batch(snake_case__ , snake_case__ , **snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.convert_tokens_to_ids(snake_case__ )
if self.legacy_behaviour:
UpperCAmelCase__ = []
UpperCAmelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
UpperCAmelCase__ = [self.cur_lang_code]
UpperCAmelCase__ = [self.eos_token_id]
UpperCAmelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCAmelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCAmelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : int ):
"""simple docstring"""
UpperCAmelCase__ = self.convert_tokens_to_ids(snake_case__ )
if self.legacy_behaviour:
UpperCAmelCase__ = []
UpperCAmelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
UpperCAmelCase__ = [self.cur_lang_code]
UpperCAmelCase__ = [self.eos_token_id]
UpperCAmelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCAmelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCAmelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(snake_case__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
UpperCAmelCase__ = os.path.join(
snake_case__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
return (out_vocab_file,)
| 346 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case = {
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 348 | 0 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class snake_case__ ( lowerCamelCase__ ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> List[Any]:
super().__init__(
snake_case__ , split=snake_case__ , features=snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ , streaming=snake_case__ , num_proc=snake_case__ , **snake_case__ , )
__magic_name__ : str = path_or_paths if isinstance(snake_case__ , snake_case__ ) else {self.split: path_or_paths}
__magic_name__ : List[str] = Text(
cache_dir=snake_case__ , data_files=snake_case__ , features=snake_case__ , **snake_case__ , )
def __magic_name__ ( self ) -> Dict:
if self.streaming:
__magic_name__ : str = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__magic_name__ : Optional[Any] = None
__magic_name__ : Optional[int] = None
__magic_name__ : List[str] = None
__magic_name__ : Any = None
self.builder.download_and_prepare(
download_config=snake_case__ , download_mode=snake_case__ , verification_mode=snake_case__ , base_path=snake_case__ , num_proc=self.num_proc , )
__magic_name__ : List[Any] = self.builder.as_dataset(
split=self.split , verification_mode=snake_case__ , in_memory=self.keep_in_memory )
return dataset
| 342 | import os
from typing import Dict, List, Tuple, TypeVar, Union
__snake_case = TypeVar('''T''')
__snake_case = Union[List[T], Tuple[T, ...]]
__snake_case = Union[T, List[T], Dict[str, T]]
__snake_case = Union[str, bytes, os.PathLike]
| 348 | 0 |
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase = [False] * len(__lowerCAmelCase )
lowerCAmelCase = [-1] * len(__lowerCAmelCase )
def dfs(SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str ):
lowerCAmelCase = True
lowerCAmelCase = c
for u in graph[v]:
if not visited[u]:
dfs(__lowerCAmelCase , 1 - c )
for i in range(len(__lowerCAmelCase ) ):
if not visited[i]:
dfs(__lowerCAmelCase , 0 )
for i in range(len(__lowerCAmelCase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
SCREAMING_SNAKE_CASE__ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 46 | import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__snake_case = None
__snake_case = logging.get_logger(__name__)
__snake_case = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__snake_case = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
},
'''tokenizer_file''': {
'''google/bigbird-roberta-base''': (
'''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'''
),
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'''
),
},
}
__snake_case = {
'''google/bigbird-roberta-base''': 40_96,
'''google/bigbird-roberta-large''': 40_96,
'''google/bigbird-base-trivia-itc''': 40_96,
}
__snake_case = '''▁'''
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : Dict = VOCAB_FILES_NAMES
__lowerCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : List[str] = BigBirdTokenizer
__lowerCamelCase : Any = ["""input_ids""", """attention_mask"""]
__lowerCamelCase : List[int] = []
def __init__( self , snake_case__=None , snake_case__=None , snake_case__="<unk>" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="<pad>" , snake_case__="[SEP]" , snake_case__="[MASK]" , snake_case__="[CLS]" , **snake_case__ , ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Any =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else bos_token
UpperCAmelCase : Optional[int] =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else eos_token
UpperCAmelCase : List[str] =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else unk_token
UpperCAmelCase : Union[str, Any] =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else pad_token
UpperCAmelCase : int =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else cls_token
UpperCAmelCase : str =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase : List[Any] =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , **snake_case__ , )
UpperCAmelCase : Tuple =vocab_file
UpperCAmelCase : Optional[int] =False if not self.vocab_file else True
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase : int =[self.sep_token_id]
UpperCAmelCase : Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None , snake_case__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =[self.sep_token_id]
UpperCAmelCase : Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(snake_case__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase : Optional[int] =os.path.join(
snake_case__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
return (out_vocab_file,)
| 348 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
A : int = logging.get_logger(__name__)
if is_vision_available():
import PIL
class A (lowerCamelCase__ ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = ["""pixel_values"""]
def __init__( self : Dict , __lowerCAmelCase : Optional[Any] = True , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Union[str, Any] = PILImageResampling.BICUBIC , __lowerCAmelCase : Union[str, Any] = True , __lowerCAmelCase : Tuple = None , __lowerCAmelCase : Dict = True , __lowerCAmelCase : Dict = 1 / 2_55 , __lowerCAmelCase : Dict = True , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Dict = None , __lowerCAmelCase : Optional[int] = True , **__lowerCAmelCase : Tuple , ) -> None:
"""simple docstring"""
super().__init__(**snake_case__ )
A__ = size if size is not None else {'''shortest_edge''': 2_24}
A__ = get_size_dict(snake_case__ , default_to_square=snake_case__ )
A__ = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
A__ = get_size_dict(snake_case__ , default_to_square=snake_case__ , param_name="""crop_size""" )
A__ = do_resize
A__ = size
A__ = resample
A__ = do_center_crop
A__ = crop_size
A__ = do_rescale
A__ = rescale_factor
A__ = do_normalize
A__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A__ = image_std if image_std is not None else OPENAI_CLIP_STD
A__ = do_convert_rgb
def a_ ( self : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : int = PILImageResampling.BICUBIC , __lowerCAmelCase : Optional[Any] = None , **__lowerCAmelCase : Dict , ) -> np.ndarray:
"""simple docstring"""
A__ = get_size_dict(snake_case__ , default_to_square=snake_case__ )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A__ = get_resize_output_image_size(snake_case__ , size=size["""shortest_edge"""] , default_to_square=snake_case__ )
return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
def a_ ( self : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] = None , **__lowerCAmelCase : Dict , ) -> np.ndarray:
"""simple docstring"""
A__ = get_size_dict(snake_case__ )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(snake_case__ , size=(size["""height"""], size["""width"""]) , data_format=snake_case__ , **snake_case__ )
def a_ ( self : Any , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Tuple = None , **__lowerCAmelCase : Optional[Any] , ) -> int:
"""simple docstring"""
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def a_ ( self : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] = None , **__lowerCAmelCase : Dict , ) -> np.ndarray:
"""simple docstring"""
return normalize(snake_case__ , mean=snake_case__ , std=snake_case__ , data_format=snake_case__ , **snake_case__ )
def a_ ( self : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] = None , __lowerCAmelCase : Union[str, Any] = None , __lowerCAmelCase : List[Any] = None , __lowerCAmelCase : Any = None , __lowerCAmelCase : str = None , __lowerCAmelCase : Optional[Any] = None , __lowerCAmelCase : Any = None , __lowerCAmelCase : str = None , __lowerCAmelCase : str = None , __lowerCAmelCase : Dict = None , __lowerCAmelCase : int = None , __lowerCAmelCase : Tuple = None , __lowerCAmelCase : List[str] = ChannelDimension.FIRST , **__lowerCAmelCase : List[str] , ) -> PIL.Image.Image:
"""simple docstring"""
A__ = do_resize if do_resize is not None else self.do_resize
A__ = size if size is not None else self.size
A__ = get_size_dict(snake_case__ , param_name="""size""" , default_to_square=snake_case__ )
A__ = resample if resample is not None else self.resample
A__ = do_center_crop if do_center_crop is not None else self.do_center_crop
A__ = crop_size if crop_size is not None else self.crop_size
A__ = get_size_dict(snake_case__ , param_name="""crop_size""" , default_to_square=snake_case__ )
A__ = do_rescale if do_rescale is not None else self.do_rescale
A__ = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = image_mean if image_mean is not None else self.image_mean
A__ = image_std if image_std is not None else self.image_std
A__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A__ = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A__ = [convert_to_rgb(snake_case__ ) for image in images]
# All transformations expect numpy arrays.
A__ = [to_numpy_array(snake_case__ ) for image in images]
if do_resize:
A__ = [self.resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ ) for image in images]
if do_center_crop:
A__ = [self.center_crop(image=snake_case__ , size=snake_case__ ) for image in images]
if do_rescale:
A__ = [self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
if do_normalize:
A__ = [self.normalize(image=snake_case__ , mean=snake_case__ , std=snake_case__ ) for image in images]
A__ = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
A__ = {'''pixel_values''': images}
return BatchFeature(data=snake_case__ , tensor_type=snake_case__ )
| 274 | from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase_ ( __lowerCAmelCase )-> Optional[Any]:
'''simple docstring'''
def is_in_circle(__lowerCAmelCase , __lowerCAmelCase ) -> bool:
UpperCAmelCase : List[Any] =sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
UpperCAmelCase : List[Any] =mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(__lowerCAmelCase ) )
# The ratio of the area for circle to square is pi/4.
UpperCAmelCase : Dict =proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1.0 , )-> float:
'''simple docstring'''
return mean(
function_to_integrate(uniform(__lowerCAmelCase , __lowerCAmelCase ) ) for _ in range(__lowerCAmelCase ) ) * (max_value - min_value)
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1.0 )-> None:
'''simple docstring'''
def identity_function(__lowerCAmelCase ) -> float:
return x
UpperCAmelCase : List[Any] =area_under_curve_estimator(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
UpperCAmelCase : Dict =(max_value * max_value - min_value * min_value) / 2
print('''******************''' )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print('''******************''' )
def lowerCAmelCase_ ( __lowerCAmelCase )-> None:
'''simple docstring'''
def function_to_integrate(__lowerCAmelCase ) -> float:
return sqrt(4.0 - x * x )
UpperCAmelCase : Dict =area_under_curve_estimator(
__lowerCAmelCase , __lowerCAmelCase , 0.0 , 2.0 )
print('''******************''' )
print('''Estimating pi using area_under_curve_estimator''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print('''******************''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348 | 0 |
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class A_ ( lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ort.SessionOptions()
lowercase = False
return options
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
lowercase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
lowercase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting' , revision='onnx' , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case__ )
lowercase = '''A red cat sitting on a park bench'''
lowercase = np.random.RandomState(0 )
lowercase = pipe(
prompt=snake_case__ , image=snake_case__ , mask_image=snake_case__ , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case__ , output_type='np' , )
lowercase = output.images
lowercase = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
lowercase = np.array([0.2_514, 0.3_007, 0.3_517, 0.1_790, 0.2_382, 0.3_167, 0.1_944, 0.2_273, 0.2_464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
lowercase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
lowercase = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-inpainting' , subfolder='scheduler' , revision='onnx' )
lowercase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting' , revision='onnx' , scheduler=snake_case__ , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case__ )
lowercase = '''A red cat sitting on a park bench'''
lowercase = np.random.RandomState(0 )
lowercase = pipe(
prompt=snake_case__ , image=snake_case__ , mask_image=snake_case__ , guidance_scale=7.5 , num_inference_steps=20 , generator=snake_case__ , output_type='np' , )
lowercase = output.images
lowercase = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
lowercase = np.array([0.0_086, 0.0_077, 0.0_083, 0.0_093, 0.0_107, 0.0_139, 0.0_094, 0.0_097, 0.0_125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 195 | from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case :
def __init__( self , snake_case__ , snake_case__=12 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=32 , snake_case__=2 , snake_case__=4 , snake_case__=37 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=0.02 , snake_case__=0 , snake_case__=None , ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : List[Any] =parent
UpperCAmelCase : Optional[int] =batch_size
UpperCAmelCase : List[Any] =seq_length
UpperCAmelCase : Optional[int] =is_training
UpperCAmelCase : Union[str, Any] =use_input_mask
UpperCAmelCase : Tuple =use_labels
UpperCAmelCase : Union[str, Any] =vocab_size
UpperCAmelCase : Tuple =hidden_size
UpperCAmelCase : Dict =projection_dim
UpperCAmelCase : Optional[int] =num_hidden_layers
UpperCAmelCase : Dict =num_attention_heads
UpperCAmelCase : int =intermediate_size
UpperCAmelCase : Any =dropout
UpperCAmelCase : Union[str, Any] =attention_dropout
UpperCAmelCase : Union[str, Any] =max_position_embeddings
UpperCAmelCase : List[str] =initializer_range
UpperCAmelCase : str =scope
UpperCAmelCase : str =bos_token_id
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : int =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : int =None
if self.use_input_mask:
UpperCAmelCase : Union[str, Any] =random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
UpperCAmelCase : Optional[int] =input_mask.numpy()
UpperCAmelCase , UpperCAmelCase : List[Any] =input_mask.shape
UpperCAmelCase : Optional[Any] =np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(snake_case__ ):
UpperCAmelCase : List[Any] =1
UpperCAmelCase : Tuple =0
UpperCAmelCase : List[Any] =self.get_config()
return config, input_ids, tf.convert_to_tensor(snake_case__ )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Tuple =TFBlipTextModel(config=snake_case__ )
UpperCAmelCase : List[Any] =model(snake_case__ , attention_mask=snake_case__ , training=snake_case__ )
UpperCAmelCase : str =model(snake_case__ , training=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[str] =self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] =config_and_inputs
UpperCAmelCase : Optional[int] ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __snake_case ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Optional[int] = (TFBlipTextModel,) if is_tf_available() else ()
__lowerCamelCase : Dict = False
__lowerCamelCase : Optional[Any] = False
__lowerCamelCase : Dict = False
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : str =BlipTextModelTester(self )
UpperCAmelCase : Optional[int] =ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
pass
@slow
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Optional[Any] =TFBlipTextModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def UpperCAmelCase__ ( self , snake_case__=True ) -> Any:
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=snake_case__ )
| 348 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__A =logging.get_logger(__name__)
__A ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
__A ={
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
__A ={'allegro/herbert-base-cased': 5_14}
__A ={}
class _snake_case ( lowerCamelCase__ ):
lowerCAmelCase :int = VOCAB_FILES_NAMES
lowerCAmelCase :int = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase :Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase :List[Any] = HerbertTokenizer
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase="</s>" , **_lowerCamelCase , ):
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , sep_token=snake_case__ , **snake_case__ , )
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = None):
UpperCAmelCase__ : int = [self.cls_token_id]
UpperCAmelCase__ : Any = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__)
if token_ids_a is None:
return [1] + ([0] * len(snake_case__)) + [1]
return [1] + ([0] * len(snake_case__)) + [1] + ([0] * len(snake_case__)) + [1]
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = None):
UpperCAmelCase__ : str = [self.sep_token_id]
UpperCAmelCase__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = None):
UpperCAmelCase__ : Any = self._tokenizer.model.save(snake_case__ , name=snake_case__)
return tuple(snake_case__) | 163 | import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__snake_case = logging.get_logger(__name__)
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> str:
'''simple docstring'''
UpperCAmelCase : Dict =nn.functional.normalize(__lowerCAmelCase )
UpperCAmelCase : Tuple =nn.functional.normalize(__lowerCAmelCase )
return torch.mm(__lowerCAmelCase , normalized_text_embeds.t() )
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : List[str] = CLIPConfig
__lowerCamelCase : List[Any] = ["""CLIPEncoderLayer"""]
def __init__( self , snake_case__ ) -> Dict:
'''simple docstring'''
super().__init__(snake_case__ )
UpperCAmelCase : Dict =CLIPVisionModel(config.vision_config )
UpperCAmelCase : Optional[Any] =nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=snake_case__ )
UpperCAmelCase : int =nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=snake_case__ )
UpperCAmelCase : List[str] =nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=snake_case__ )
UpperCAmelCase : str =nn.Parameter(torch.ones(17 ) , requires_grad=snake_case__ )
UpperCAmelCase : Optional[int] =nn.Parameter(torch.ones(3 ) , requires_grad=snake_case__ )
@torch.no_grad()
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =self.vision_model(snake_case__ )[1] # pooled_output
UpperCAmelCase : Optional[Any] =self.visual_projection(snake_case__ )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCAmelCase : List[str] =cosine_distance(snake_case__ , self.special_care_embeds ).cpu().float().numpy()
UpperCAmelCase : Optional[Any] =cosine_distance(snake_case__ , self.concept_embeds ).cpu().float().numpy()
UpperCAmelCase : Tuple =[]
UpperCAmelCase : Dict =image_embeds.shape[0]
for i in range(snake_case__ ):
UpperCAmelCase : str ={'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
UpperCAmelCase : str =0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
UpperCAmelCase : Optional[Any] =special_cos_dist[i][concept_idx]
UpperCAmelCase : Union[str, Any] =self.special_care_embeds_weights[concept_idx].item()
UpperCAmelCase : str =round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} )
UpperCAmelCase : int =0.01
for concept_idx in range(len(cos_dist[0] ) ):
UpperCAmelCase : Any =cos_dist[i][concept_idx]
UpperCAmelCase : Optional[int] =self.concept_embeds_weights[concept_idx].item()
UpperCAmelCase : int =round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(snake_case__ )
result.append(snake_case__ )
UpperCAmelCase : Optional[int] =[len(res['''bad_concepts'''] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Any =self.vision_model(snake_case__ )[1] # pooled_output
UpperCAmelCase : List[str] =self.visual_projection(snake_case__ )
UpperCAmelCase : Any =cosine_distance(snake_case__ , self.special_care_embeds )
UpperCAmelCase : Optional[Any] =cosine_distance(snake_case__ , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
UpperCAmelCase : Optional[Any] =0.0
UpperCAmelCase : Any =special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
UpperCAmelCase : str =torch.any(special_scores > 0 , dim=1 )
UpperCAmelCase : List[Any] =special_care * 0.01
UpperCAmelCase : Union[str, Any] =special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
UpperCAmelCase : List[Any] =(cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
UpperCAmelCase : str =torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 348 | 0 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("dataset_size" , [None, 4_0_0 * 2**2_0, 6_0_0 * 2**2_0] )
@pytest.mark.parametrize("input_in_memory_max_size" , ["default", 0, 1_0_0 * 2**2_0, 9_0_0 * 2**2_0] )
def UpperCAmelCase ( a_ , a_ , a_ ) -> Optional[int]:
"""simple docstring"""
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , "IN_MEMORY_MAX_SIZE" , __lowerCAmelCase )
__A = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
__A = dataset_size < in_memory_max_size
else:
__A = False
__A = is_small_dataset(__lowerCAmelCase )
assert result == expected
| 15 | import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__snake_case = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
__snake_case = parser.parse_args()
__snake_case = '''cpu'''
__snake_case = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
__snake_case = '''path-to-your-trained-model'''
__snake_case = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__snake_case = pipe.to(device)
# to channels last
__snake_case = pipe.unet.to(memory_format=torch.channels_last)
__snake_case = pipe.vae.to(memory_format=torch.channels_last)
__snake_case = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__snake_case = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__snake_case = torch.randn(2, 4, 64, 64)
__snake_case = torch.rand(1) * 9_99
__snake_case = torch.randn(2, 77, 7_68)
__snake_case = (sample, timestep, encoder_hidden_status)
try:
__snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__snake_case = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__snake_case = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__snake_case = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__snake_case = 6_66
__snake_case = torch.Generator(device).manual_seed(seed)
__snake_case = {'''generator''': generator}
if args.steps is not None:
__snake_case = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__snake_case = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 348 | 0 |
import math
def a( A : Any , A : Optional[Any] ) -> str:
"""simple docstring"""
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(__lowerCAmelCase )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
_lowercase: Any = "Enter the base and the power separated by a comma: "
_lowercase , _lowercase: Optional[int] = map(int, input(prompt).split(","))
_lowercase , _lowercase: int = map(int, input(prompt).split(","))
# We find the log of each number, using the function res(), which takes two
# arguments.
_lowercase: Optional[int] = res(xa, ya)
_lowercase: Tuple = res(xa, ya)
# We check for the largest number
if resa > resa:
print("Largest number is", xa, "^", ya)
elif resa > resa:
print("Largest number is", xa, "^", ya)
else:
print("Both are equal")
| 227 | __snake_case = '''Input must be a string of 8 numbers plus letter'''
__snake_case = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def lowerCAmelCase_ ( __lowerCAmelCase )-> bool:
'''simple docstring'''
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
UpperCAmelCase : Optional[Any] =f'''Expected string as input, found {type(__lowerCAmelCase ).__name__}'''
raise TypeError(__lowerCAmelCase )
UpperCAmelCase : List[Any] =spanish_id.replace('''-''' , '''''' ).upper()
if len(__lowerCAmelCase ) != 9:
raise ValueError(__lowerCAmelCase )
try:
UpperCAmelCase : int =int(spanish_id_clean[0:8] )
UpperCAmelCase : Optional[int] =spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__lowerCAmelCase ) from ex
if letter.isdigit():
raise ValueError(__lowerCAmelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348 | 0 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
def A__ ( self , lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
_lowercase =model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(snake_case__ )
def A__ ( self ) -> Tuple:
'''simple docstring'''
_lowercase ='''sshleifer/tiny-gpt2'''
_lowercase =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case__ , inference=snake_case__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=snake_case__ , multi_process=snake_case__ , )
_lowercase =TensorFlowBenchmark(snake_case__ )
_lowercase =benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
_lowercase ='''sgugger/tiny-distilbert-classification'''
_lowercase =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case__ , inference=snake_case__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case__ , only_pretrain_model=snake_case__ , )
_lowercase =TensorFlowBenchmark(snake_case__ )
_lowercase =benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A__ ( self ) -> Tuple:
'''simple docstring'''
_lowercase ='''sshleifer/tiny-gpt2'''
_lowercase =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case__ , inference=snake_case__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case__ , )
_lowercase =TensorFlowBenchmark(snake_case__ )
_lowercase =benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
_lowercase ='''sshleifer/tiny-gpt2'''
_lowercase =AutoConfig.from_pretrained(snake_case__ )
_lowercase =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case__ , inference=snake_case__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=snake_case__ , multi_process=snake_case__ , )
_lowercase =TensorFlowBenchmark(snake_case__ , [config] )
_lowercase =benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A__ ( self ) -> int:
'''simple docstring'''
_lowercase ='''sshleifer/tiny-gpt2'''
_lowercase =AutoConfig.from_pretrained(snake_case__ )
_lowercase =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case__ , inference=snake_case__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case__ , )
_lowercase =TensorFlowBenchmark(snake_case__ , [config] )
_lowercase =benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A__ ( self ) -> Dict:
'''simple docstring'''
_lowercase ='''sshleifer/tiny-gpt2'''
_lowercase =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case__ , inference=snake_case__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case__ , )
_lowercase =TensorFlowBenchmark(snake_case__ )
_lowercase =benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A__ ( self ) -> str:
'''simple docstring'''
_lowercase ='''sshleifer/tiny-gpt2'''
_lowercase =AutoConfig.from_pretrained(snake_case__ )
_lowercase =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case__ , inference=snake_case__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case__ , )
_lowercase =TensorFlowBenchmark(snake_case__ , [config] )
_lowercase =benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
_lowercase ='''patrickvonplaten/t5-tiny-random'''
_lowercase =AutoConfig.from_pretrained(snake_case__ )
_lowercase =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case__ , inference=snake_case__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case__ , )
_lowercase =TensorFlowBenchmark(snake_case__ , configs=[config] )
_lowercase =benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' )
def A__ ( self ) -> Tuple:
'''simple docstring'''
_lowercase ='''sshleifer/tiny-gpt2'''
_lowercase =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case__ , inference=snake_case__ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=snake_case__ , multi_process=snake_case__ , )
_lowercase =TensorFlowBenchmark(snake_case__ )
_lowercase =benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A__ ( self ) -> Tuple:
'''simple docstring'''
_lowercase ='''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
_lowercase =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=snake_case__ , save_to_csv=snake_case__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(snake_case__ , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(snake_case__ , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(snake_case__ , 'env.csv' ) , multi_process=snake_case__ , )
_lowercase =TensorFlowBenchmark(snake_case__ )
benchmark.run()
self.assertTrue(Path(os.path.join(snake_case__ , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(snake_case__ , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(snake_case__ , 'env.csv' ) ).exists() )
def A__ ( self ) -> int:
'''simple docstring'''
_lowercase ='''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(lowerCAmelCase ):
self.assertTrue(hasattr(snake_case__ , 'sequential' ) )
self.assertTrue(hasattr(snake_case__ , 'cumulative' ) )
self.assertTrue(hasattr(snake_case__ , 'current' ) )
self.assertTrue(hasattr(snake_case__ , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_lowercase =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=snake_case__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(snake_case__ , 'log.txt' ) , log_print=snake_case__ , trace_memory_line_by_line=snake_case__ , eager_mode=snake_case__ , multi_process=snake_case__ , )
_lowercase =TensorFlowBenchmark(snake_case__ )
_lowercase =benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(snake_case__ , 'log.txt' ) ).exists() )
| 205 | def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> str:
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
UpperCAmelCase : Dict =str(bin(__lowerCAmelCase ) )
binary_number += "0" * shift_amount
return binary_number
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> str:
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
UpperCAmelCase : Any =str(bin(__lowerCAmelCase ) )[2:]
if shift_amount >= len(__lowerCAmelCase ):
return "0b0"
UpperCAmelCase : Optional[Any] =binary_number[: len(__lowerCAmelCase ) - shift_amount]
return "0b" + shifted_binary_number
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> str:
'''simple docstring'''
if number >= 0: # Get binary representation of positive number
UpperCAmelCase : Optional[Any] ='''0''' + str(bin(__lowerCAmelCase ) ).strip('''-''' )[2:]
else: # Get binary (2's complement) representation of negative number
UpperCAmelCase : int =len(bin(__lowerCAmelCase )[3:] ) # Find 2's complement of number
UpperCAmelCase : Any =bin(abs(__lowerCAmelCase ) - (1 << binary_number_length) )[3:]
UpperCAmelCase : Optional[Any] =(
'''1''' + '''0''' * (binary_number_length - len(__lowerCAmelCase )) + binary_number
)
if shift_amount >= len(__lowerCAmelCase ):
return "0b" + binary_number[0] * len(__lowerCAmelCase )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__lowerCAmelCase ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348 | 0 |
"""simple docstring"""
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
a :Union[str, Any] = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("", "|", "|"),
datarow=DataRow("", "|", "|"),
padding=1,
with_header_hide=None,
)
a :Union[str, Any] = []
a :Optional[int] = []
a :Tuple = {"type": "section", "text": {"type": "plain_text", "text": "No failed tests! 🤗", "emoji": True}}
a :Optional[int] = [
{
"type": "header",
"text": {
"type": "plain_text",
"text": f'🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results',
"emoji": True,
},
}
]
a :List[Any] = 0
for log in Path().glob("*.log"):
a :List[str] = 0
with open(log, "r") as f:
for line in f:
a :int = json.loads(line)
if line.get("nodeid", "") != "":
a :str = line["nodeid"]
if line.get("duration", None) is not None:
a :List[str] = f'{line["duration"]:.4f}'
if line.get("outcome", "") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("_")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
a :List[Any] = []
log.unlink()
a :List[str] = ""
a :List[str] = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
a :int = []
a :Union[str, Any] = {}
for test in failed_tests:
a :List[str] = test[0].split("::")
a :int = data[0].split("/")[-1]
if data[0] not in filesafailed:
a :Dict = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
a :int = [test[0] for test in failed_table]
a :Optional[Any] = list(set(files))
# Count number of instances in failed_tests
a :Dict = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
a :Dict = tabulate(
table,
headers=["Test Location", "Num Failed"],
tablefmt=hf_table_format,
stralign="right",
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_000:
a :Any = "Too many failed tests, please see the full report in the Action results."
a :List[Any] = len(err) + 10
a :str = message[: 3_000 - offset] + f'\n...\n```\n{err}'
print(f'### {message}')
else:
a :Tuple = "No failed tests! 🤗"
print(f'## {message}')
payload.append(no_error_payload)
if os.environ.get("TEST_TYPE", "") != "":
from slack_sdk import WebClient
a :Optional[Any] = WebClient(token=os.environ["SLACK_API_TOKEN"])
if message != "No failed tests! 🤗":
a :Tuple = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": message,
},
}
payload.append(md_report)
a :Any = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*For more details:*",
},
"accessory": {
"type": "button",
"text": {
"type": "plain_text",
"text": "Check Action results",
"emoji": True,
},
"url": f'https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
payload.append(action_button)
a :Optional[int] = {
"type": "context",
"elements": [
{
"type": "plain_text",
"text": f'Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}',
}
],
}
payload.append(date_report)
a :List[str] = client.chat_postMessage(channel="#accelerate-ci-daily", text=message, blocks=payload)
a :Any = response.data["ts"]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
a :Union[str, Any] = ""
for i, row in enumerate(test_failures):
if row[0] != test_class:
a :str = row[0]
else:
a :List[str] = ""
a :Optional[int] = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```',
},
}
client.chat_postMessage(
channel="#accelerate-ci-daily",
thread_ts=ts,
blocks=[payload],
)
| 132 | from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
# TODO Update this
__snake_case = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : Tuple = """esm"""
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=1026 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__="absolute" , snake_case__=True , snake_case__=None , snake_case__=False , snake_case__=False , snake_case__=None , snake_case__=None , **snake_case__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , mask_token_id=snake_case__ , **snake_case__ )
UpperCAmelCase : List[str] =vocab_size
UpperCAmelCase : str =hidden_size
UpperCAmelCase : List[Any] =num_hidden_layers
UpperCAmelCase : Optional[Any] =num_attention_heads
UpperCAmelCase : str =intermediate_size
UpperCAmelCase : Any =hidden_dropout_prob
UpperCAmelCase : int =attention_probs_dropout_prob
UpperCAmelCase : Dict =max_position_embeddings
UpperCAmelCase : List[str] =initializer_range
UpperCAmelCase : Union[str, Any] =layer_norm_eps
UpperCAmelCase : Dict =position_embedding_type
UpperCAmelCase : Optional[Any] =use_cache
UpperCAmelCase : int =emb_layer_norm_before
UpperCAmelCase : List[str] =token_dropout
UpperCAmelCase : Optional[Any] =is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
UpperCAmelCase : Optional[Any] =EsmFoldConfig()
elif isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase : Optional[int] =EsmFoldConfig(**snake_case__ )
UpperCAmelCase : Tuple =esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
UpperCAmelCase : Any =get_default_vocab_list()
else:
UpperCAmelCase : Tuple =vocab_list
else:
UpperCAmelCase : Optional[int] =None
UpperCAmelCase : Union[str, Any] =None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , snake_case__ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =super().to_dict()
if isinstance(self.esmfold_config , snake_case__ ):
UpperCAmelCase : str =self.esmfold_config.to_dict()
return output
@dataclass
class __snake_case :
__lowerCamelCase : str = None
__lowerCamelCase : bool = True
__lowerCamelCase : bool = False
__lowerCamelCase : bool = False
__lowerCamelCase : bool = False
__lowerCamelCase : float = 0
__lowerCamelCase : bool = True
__lowerCamelCase : bool = False
__lowerCamelCase : int = 128
__lowerCamelCase : "TrunkConfig" = None
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
if self.trunk is None:
UpperCAmelCase : str =TrunkConfig()
elif isinstance(self.trunk , snake_case__ ):
UpperCAmelCase : Optional[int] =TrunkConfig(**self.trunk )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =asdict(self )
UpperCAmelCase : Any =self.trunk.to_dict()
return output
@dataclass
class __snake_case :
__lowerCamelCase : int = 48
__lowerCamelCase : int = 1024
__lowerCamelCase : int = 128
__lowerCamelCase : int = 32
__lowerCamelCase : int = 32
__lowerCamelCase : int = 32
__lowerCamelCase : float = 0
__lowerCamelCase : float = 0
__lowerCamelCase : bool = False
__lowerCamelCase : int = 4
__lowerCamelCase : Optional[int] = 128
__lowerCamelCase : "StructureModuleConfig" = None
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
if self.structure_module is None:
UpperCAmelCase : Any =StructureModuleConfig()
elif isinstance(self.structure_module , snake_case__ ):
UpperCAmelCase : str =StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
UpperCAmelCase : Optional[int] =self.sequence_state_dim // self.sequence_head_width
UpperCAmelCase : Any =self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(f'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =asdict(self )
UpperCAmelCase : Tuple =self.structure_module.to_dict()
return output
@dataclass
class __snake_case :
__lowerCamelCase : int = 384
__lowerCamelCase : int = 128
__lowerCamelCase : int = 16
__lowerCamelCase : int = 128
__lowerCamelCase : int = 12
__lowerCamelCase : int = 4
__lowerCamelCase : int = 8
__lowerCamelCase : float = 0.1
__lowerCamelCase : int = 8
__lowerCamelCase : int = 1
__lowerCamelCase : int = 2
__lowerCamelCase : int = 7
__lowerCamelCase : int = 10
__lowerCamelCase : float = 1E-8
__lowerCamelCase : float = 1E5
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return asdict(self )
def lowerCAmelCase_ ( )-> Tuple:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 348 | 0 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_UpperCamelCase = 16
_UpperCamelCase = 32
def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any] = 16 ):
"""simple docstring"""
__UpperCAmelCase : int = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__UpperCAmelCase : List[str] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowerCAmelCase__ : List[str] ):
# max_length=None => use the model max length (it's actually the default)
__UpperCAmelCase : Optional[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__UpperCAmelCase : Tuple = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCAmelCase : List[str] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowerCAmelCase__ : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__UpperCAmelCase : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__UpperCAmelCase : Union[str, Any] = 16
elif accelerator.mixed_precision != "no":
__UpperCAmelCase : Any = 8
else:
__UpperCAmelCase : int = None
return tokenizer.pad(
__lowerCAmelCase , padding="""longest""" , max_length=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
__UpperCAmelCase : Optional[Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
__UpperCAmelCase : str = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_UpperCamelCase = mocked_dataloaders # noqa: F811
def lowercase_ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] ):
"""simple docstring"""
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __lowerCAmelCase ) == "1":
__UpperCAmelCase : Dict = 2
# New Code #
__UpperCAmelCase : Any = int(args.gradient_accumulation_steps )
__UpperCAmelCase : Tuple = int(args.local_sgd_steps )
# Initialize accelerator
__UpperCAmelCase : List[str] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__lowerCAmelCase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCAmelCase : Optional[int] = config['''lr''']
__UpperCAmelCase : int = int(config["""num_epochs"""] )
__UpperCAmelCase : int = int(config["""seed"""] )
__UpperCAmelCase : Any = int(config["""batch_size"""] )
__UpperCAmelCase : Union[str, Any] = evaluate.load("""glue""" , """mrpc""" )
set_seed(__lowerCAmelCase )
__UpperCAmelCase : Optional[int] = get_dataloaders(__lowerCAmelCase , __lowerCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCAmelCase : str = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__UpperCAmelCase : Union[str, Any] = model.to(accelerator.device )
# Instantiate optimizer
__UpperCAmelCase : Optional[int] = AdamW(params=model.parameters() , lr=__lowerCAmelCase )
# Instantiate scheduler
__UpperCAmelCase : List[str] = get_linear_schedule_with_warmup(
optimizer=__lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(__lowerCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCAmelCase : Any = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Now we train the model
for epoch in range(__lowerCAmelCase ):
model.train()
with LocalSGD(
accelerator=__lowerCAmelCase , model=__lowerCAmelCase , local_sgd_steps=__lowerCAmelCase , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__lowerCAmelCase ):
__UpperCAmelCase : List[str] = model(**__lowerCAmelCase )
__UpperCAmelCase : List[str] = output.loss
accelerator.backward(__lowerCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(**__lowerCAmelCase )
__UpperCAmelCase : int = outputs.logits.argmax(dim=-1 )
__UpperCAmelCase : int = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__lowerCAmelCase , references=__lowerCAmelCase , )
__UpperCAmelCase : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , __lowerCAmelCase )
def lowercase_ ( ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__lowerCAmelCase , default=__lowerCAmelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__lowerCAmelCase , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument(
"""--local_sgd_steps""" , type=__lowerCAmelCase , default=8 , help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
__UpperCAmelCase : Union[str, Any] = parser.parse_args()
__UpperCAmelCase : Optional[int] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 254 | import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : Optional[int] = (KDPMaDiscreteScheduler,)
__lowerCamelCase : List[str] = 10
def UpperCAmelCase__ ( self , **snake_case__ ) -> str:
'''simple docstring'''
UpperCAmelCase : int ={
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**snake_case__ )
return config
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case__ )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=snake_case__ )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case__ )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =self.scheduler_classes[0]
UpperCAmelCase : Optional[int] =self.get_scheduler_config(prediction_type='''v_prediction''' )
UpperCAmelCase : Optional[Any] =scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase : str =self.dummy_model()
UpperCAmelCase : Optional[Any] =self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase : Union[str, Any] =sample.to(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase : str =scheduler.scale_model_input(snake_case__ , snake_case__ )
UpperCAmelCase : Any =model(snake_case__ , snake_case__ )
UpperCAmelCase : Union[str, Any] =scheduler.step(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase : int =output.prev_sample
UpperCAmelCase : Dict =torch.sum(torch.abs(snake_case__ ) )
UpperCAmelCase : Optional[Any] =torch.mean(torch.abs(snake_case__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.69_34e-07 ) < 1e-2
assert abs(result_mean.item() - 6.11_12e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0002 ) < 1e-3
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
if torch_device == "mps":
return
UpperCAmelCase : Any =self.scheduler_classes[0]
UpperCAmelCase : Optional[int] =self.get_scheduler_config()
UpperCAmelCase : Optional[Any] =scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase : Optional[int] =self.dummy_model()
UpperCAmelCase : Union[str, Any] =self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase : str =sample.to(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase : Dict =scheduler.scale_model_input(snake_case__ , snake_case__ )
UpperCAmelCase : Union[str, Any] =model(snake_case__ , snake_case__ )
UpperCAmelCase : List[str] =scheduler.step(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase : Optional[int] =output.prev_sample
UpperCAmelCase : Any =torch.sum(torch.abs(snake_case__ ) )
UpperCAmelCase : Union[str, Any] =torch.mean(torch.abs(snake_case__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
if torch_device == "mps":
return
UpperCAmelCase : List[Any] =self.scheduler_classes[0]
UpperCAmelCase : Dict =self.get_scheduler_config()
UpperCAmelCase : List[str] =scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case__ )
UpperCAmelCase : int =self.dummy_model()
UpperCAmelCase : Tuple =self.dummy_sample_deter.to(snake_case__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
UpperCAmelCase : Optional[Any] =scheduler.scale_model_input(snake_case__ , snake_case__ )
UpperCAmelCase : int =model(snake_case__ , snake_case__ )
UpperCAmelCase : str =scheduler.step(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase : List[str] =output.prev_sample
UpperCAmelCase : List[str] =torch.sum(torch.abs(snake_case__ ) )
UpperCAmelCase : Dict =torch.mean(torch.abs(snake_case__ ) )
if str(snake_case__ ).startswith('''cpu''' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
| 348 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ = {
'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['MobileViTFeatureExtractor']
UpperCAmelCase_ = ['MobileViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileViTForImageClassification',
'MobileViTForSemanticSegmentation',
'MobileViTModel',
'MobileViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileViTForImageClassification',
'TFMobileViTForSemanticSegmentation',
'TFMobileViTModel',
'TFMobileViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 346 | import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Any =FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
UpperCAmelCase : Tuple =AutoTokenizer.from_pretrained('''google/mt5-small''' )
UpperCAmelCase : List[str] =tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
UpperCAmelCase : List[Any] =tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
UpperCAmelCase : Union[str, Any] =shift_tokens_right(snake_case__ , model.config.pad_token_id , model.config.decoder_start_token_id )
UpperCAmelCase : List[str] =model(snake_case__ , decoder_input_ids=snake_case__ ).logits
UpperCAmelCase : Any =optax.softmax_cross_entropy(snake_case__ , onehot(snake_case__ , logits.shape[-1] ) ).mean()
UpperCAmelCase : Union[str, Any] =-(labels.shape[-1] * loss.item())
UpperCAmelCase : List[str] =-84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 348 | 0 |
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("0.8.3"):
raise Exception("requires gluonnlp == 0.8.3")
if version.parse(mx.__version__) != version.parse("1.5.0"):
raise Exception("requires mxnet == 1.5.0")
logging.set_verbosity_info()
__magic_name__: Any = logging.get_logger(__name__)
__magic_name__: List[str] = "The Nymphenburg Palace is a beautiful palace in Munich!"
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
__magic_name__ : List[str] = {
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 1024,
'''hidden_size''': 768,
'''max_length''': 512,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 1024,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1e-5,
'''token_type_vocab_size''': 2,
}
__magic_name__ : Union[str, Any] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__magic_name__ : Any = BERTEncoder(
attention_cell=predefined_args["""attention_cell"""], num_layers=predefined_args["""num_layers"""], units=predefined_args["""units"""], hidden_size=predefined_args["""hidden_size"""], max_length=predefined_args["""max_length"""], num_heads=predefined_args["""num_heads"""], scaled=predefined_args["""scaled"""], dropout=predefined_args["""dropout"""], output_attention=__lowerCAmelCase, output_all_encodings=__lowerCAmelCase, use_residual=predefined_args["""use_residual"""], activation=predefined_args.get("""activation""", """gelu""" ), layer_norm_eps=predefined_args.get("""layer_norm_eps""", __lowerCAmelCase ), )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__magic_name__ : Union[str, Any] = '''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
__magic_name__ : Dict = os.path.join(get_home_dir(), """models""" )
__magic_name__ : Optional[int] = _load_vocab(__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, cls=__lowerCAmelCase )
__magic_name__ : Optional[int] = nlp.model.BERTModel(
__lowerCAmelCase, len(__lowerCAmelCase ), units=predefined_args["""units"""], embed_size=predefined_args["""embed_size"""], embed_dropout=predefined_args["""embed_dropout"""], word_embed=predefined_args["""word_embed"""], use_pooler=__lowerCAmelCase, use_token_type_embed=__lowerCAmelCase, token_type_vocab_size=predefined_args["""token_type_vocab_size"""], use_classifier=__lowerCAmelCase, use_decoder=__lowerCAmelCase, )
original_bort.load_parameters(__lowerCAmelCase, cast_dtype=__lowerCAmelCase, ignore_extra=__lowerCAmelCase )
__magic_name__ : List[Any] = original_bort._collect_params_with_prefix()
# Build our config 🤗
__magic_name__ : Optional[Any] = {
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.02,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(__lowerCAmelCase ),
}
__magic_name__ : Optional[Any] = BertConfig.from_dict(__lowerCAmelCase )
__magic_name__ : List[Any] = BertForMaskedLM(__lowerCAmelCase )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(_A ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(_A, _A ):
__magic_name__ : Dict = hf_param.shape
__magic_name__ : Optional[int] = to_torch(params[gluon_param] )
__magic_name__ : List[Any] = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'
return gluon_param
__magic_name__ : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight, """word_embed.0.weight""" )
__magic_name__ : Tuple = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight, """encoder.position_weight""" )
__magic_name__ : List[str] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias, """encoder.layer_norm.beta""" )
__magic_name__ : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight, """encoder.layer_norm.gamma""" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__magic_name__ : Optional[Any] = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__magic_name__ : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
__magic_name__ : BertSelfAttention = layer.attention.self
__magic_name__ : int = check_and_map_params(
self_attn.key.bias.data, f'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' )
__magic_name__ : Dict = check_and_map_params(
self_attn.key.weight.data, f'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' )
__magic_name__ : Dict = check_and_map_params(
self_attn.query.bias.data, f'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' )
__magic_name__ : Any = check_and_map_params(
self_attn.query.weight.data, f'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' )
__magic_name__ : str = check_and_map_params(
self_attn.value.bias.data, f'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' )
__magic_name__ : Tuple = check_and_map_params(
self_attn.value.weight.data, f'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' )
# self attention output
__magic_name__ : BertSelfOutput = layer.attention.output
__magic_name__ : Optional[Any] = check_and_map_params(
self_output.dense.bias, f'encoder.transformer_cells.{i}.proj.bias' )
__magic_name__ : Union[str, Any] = check_and_map_params(
self_output.dense.weight, f'encoder.transformer_cells.{i}.proj.weight' )
__magic_name__ : Dict = check_and_map_params(
self_output.LayerNorm.bias, f'encoder.transformer_cells.{i}.layer_norm.beta' )
__magic_name__ : Union[str, Any] = check_and_map_params(
self_output.LayerNorm.weight, f'encoder.transformer_cells.{i}.layer_norm.gamma' )
# intermediate
__magic_name__ : BertIntermediate = layer.intermediate
__magic_name__ : Optional[int] = check_and_map_params(
intermediate.dense.bias, f'encoder.transformer_cells.{i}.ffn.ffn_1.bias' )
__magic_name__ : Dict = check_and_map_params(
intermediate.dense.weight, f'encoder.transformer_cells.{i}.ffn.ffn_1.weight' )
# output
__magic_name__ : BertOutput = layer.output
__magic_name__ : Dict = check_and_map_params(
bert_output.dense.bias, f'encoder.transformer_cells.{i}.ffn.ffn_2.bias' )
__magic_name__ : Optional[int] = check_and_map_params(
bert_output.dense.weight, f'encoder.transformer_cells.{i}.ffn.ffn_2.weight' )
__magic_name__ : List[str] = check_and_map_params(
bert_output.LayerNorm.bias, f'encoder.transformer_cells.{i}.ffn.layer_norm.beta' )
__magic_name__ : int = check_and_map_params(
bert_output.LayerNorm.weight, f'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__magic_name__ : Dict = RobertaTokenizer.from_pretrained("""roberta-base""" )
__magic_name__ : Optional[int] = tokenizer.encode_plus(__lowerCAmelCase )['''input_ids''']
# Get gluon output
__magic_name__ : Optional[Any] = mx.nd.array([input_ids] )
__magic_name__ : int = original_bort(inputs=__lowerCAmelCase, token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(__lowerCAmelCase )
__magic_name__ : List[str] = BertModel.from_pretrained(__lowerCAmelCase )
hf_bort_model.eval()
__magic_name__ : Union[str, Any] = tokenizer.encode_plus(__lowerCAmelCase, return_tensors="""pt""" )
__magic_name__ : str = hf_bort_model(**__lowerCAmelCase )[0]
__magic_name__ : Dict = output_gluon[0].asnumpy()
__magic_name__ : Optional[int] = output_hf[0].detach().numpy()
__magic_name__ : Optional[int] = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__magic_name__ : Optional[Any] = np.allclose(__lowerCAmelCase, __lowerCAmelCase, atol=1e-3 )
if success:
print("""✔️ Both model do output the same tensors""" )
else:
print("""❌ Both model do **NOT** output the same tensors""" )
print("""Absolute difference is:""", __lowerCAmelCase )
if __name__ == "__main__":
__magic_name__: List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__magic_name__: List[str] = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 342 | import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __snake_case ( lowerCamelCase__ , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
@property
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : List[Any] =ort.SessionOptions()
UpperCAmelCase : Optional[int] =False
return options
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : int =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
UpperCAmelCase : Optional[Any] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
UpperCAmelCase : List[str] =OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase : Dict ='''A red cat sitting on a park bench'''
UpperCAmelCase : int =np.random.RandomState(0 )
UpperCAmelCase : Any =pipe(
prompt=snake_case__ , image=snake_case__ , mask_image=snake_case__ , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case__ , output_type='''np''' , )
UpperCAmelCase : Dict =output.images
UpperCAmelCase : Optional[int] =images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
UpperCAmelCase : Tuple =np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[str] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
UpperCAmelCase : Tuple =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
UpperCAmelCase : List[str] =LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
UpperCAmelCase : int =OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=snake_case__ , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase : Union[str, Any] ='''A red cat sitting on a park bench'''
UpperCAmelCase : int =np.random.RandomState(0 )
UpperCAmelCase : str =pipe(
prompt=snake_case__ , image=snake_case__ , mask_image=snake_case__ , guidance_scale=7.5 , num_inference_steps=20 , generator=snake_case__ , output_type='''np''' , )
UpperCAmelCase : Dict =output.images
UpperCAmelCase : int =images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
UpperCAmelCase : Union[str, Any] =np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 348 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class UpperCAmelCase__ :
__SCREAMING_SNAKE_CASE = field(
metadata={'''help''': '''The output directory where the model will be written.'''} , )
__SCREAMING_SNAKE_CASE = field(
metadata={
'''help''': (
'''The encoder model checkpoint for weights initialization.'''
'''Don\'t set if you want to train an encoder model from scratch.'''
)
} , )
__SCREAMING_SNAKE_CASE = field(
metadata={
'''help''': (
'''The decoder model checkpoint for weights initialization.'''
'''Don\'t set if you want to train a decoder model from scratch.'''
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase_ , metadata={'''help''': '''Pretrained encoder config name or path if not the same as encoder_model_name'''})
__SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase_ , metadata={'''help''': '''Pretrained decoder config name or path if not the same as decoder_model_name'''})
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = HfArgumentParser((ModelArguments,) )
((__UpperCamelCase) , ) = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
__UpperCamelCase = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
__UpperCamelCase = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
__UpperCamelCase = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
__UpperCamelCase = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path ,decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path ,encoder_config=__A ,decoder_config=__A ,)
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
__UpperCamelCase = decoder_config.decoder_start_token_id
__UpperCamelCase = decoder_config.pad_token_id
if decoder_start_token_id is None:
__UpperCamelCase = decoder_config.bos_token_id
if pad_token_id is None:
__UpperCamelCase = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
__UpperCamelCase = decoder_config.eos_token_id
__UpperCamelCase = decoder_start_token_id
__UpperCamelCase = pad_token_id
__UpperCamelCase = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
__UpperCamelCase = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
__UpperCamelCase = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 349 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCAmelCase__ :
def __init__( self , lowercase , ) -> Union[str, Any]:
__UpperCamelCase = parent
__UpperCamelCase = 1_3
__UpperCamelCase = 7
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = True
__UpperCamelCase = 9_9
__UpperCamelCase = 3_2
__UpperCamelCase = 2
__UpperCamelCase = 4
__UpperCamelCase = 3_7
__UpperCamelCase = """gelu"""
__UpperCamelCase = 0.1
__UpperCamelCase = 0.1
__UpperCamelCase = 5_1_2
__UpperCamelCase = 1_6
__UpperCamelCase = 2
__UpperCamelCase = 0.02
__UpperCamelCase = 3
__UpperCamelCase = 4
__UpperCamelCase = None
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Dict:
__UpperCamelCase = TFDistilBertModel(config=lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
__UpperCamelCase = [input_ids, input_mask]
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
__UpperCamelCase = TFDistilBertForMaskedLM(config=lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Tuple:
__UpperCamelCase = TFDistilBertForQuestionAnswering(config=lowercase )
__UpperCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Tuple:
__UpperCamelCase = self.num_labels
__UpperCamelCase = TFDistilBertForSequenceClassification(lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
__UpperCamelCase = self.num_choices
__UpperCamelCase = TFDistilBertForMultipleChoice(lowercase )
__UpperCamelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
__UpperCamelCase = self.num_labels
__UpperCamelCase = TFDistilBertForTokenClassification(lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = self.prepare_config_and_inputs()
((__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase)) = config_and_inputs
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
__SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = TFDistilBertModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=lowercase , dim=3_7 )
def __lowerCamelCase ( self ) -> Any:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowercase )
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowercase )
def __lowerCamelCase ( self ) -> int:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowercase )
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowercase )
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowercase )
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowercase )
@slow
def __lowerCamelCase ( self ) -> Tuple:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
__UpperCamelCase = TFDistilBertModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ) -> Optional[int]:
__UpperCamelCase = TFDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
__UpperCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCamelCase = model(lowercase )[0]
__UpperCamelCase = [1, 6, 7_6_8]
self.assertEqual(output.shape , lowercase )
__UpperCamelCase = tf.constant(
[
[
[0.19_261_885, -0.13_732_955, 0.4_119_799],
[0.22_150_156, -0.07_422_661, 0.39_037_204],
[0.22_756_018, -0.0_896_414, 0.3_701_467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-4 )
| 349 | 1 |
'''simple docstring'''
from datetime import datetime
import requests
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url="""
__UpperCamelCase = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""]
return requests.get(__A ).content
if __name__ == "__main__":
a__ : int = input('Enter Video/IGTV url: ').strip()
a__ : int = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''')
| 349 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _lowercase ( __A ,__A ):
'''simple docstring'''
return math.sqrt(sum(pow(a - b ,2 ) for a, b in zip(__A ,__A ) ) )
def _lowercase ( __A ,__A ):
'''simple docstring'''
if dataset.ndim != value_array.ndim:
__UpperCamelCase = (
"""Wrong input data's dimensions... """
f"dataset : {dataset.ndim}, value_array : {value_array.ndim}"
)
raise ValueError(__A )
try:
if dataset.shape[1] != value_array.shape[1]:
__UpperCamelCase = (
"""Wrong input data's shape... """
f"dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"
)
raise ValueError(__A )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("""Wrong shape""" )
if dataset.dtype != value_array.dtype:
__UpperCamelCase = (
"""Input data have different datatype... """
f"dataset : {dataset.dtype}, value_array : {value_array.dtype}"
)
raise TypeError(__A )
__UpperCamelCase = []
for value in value_array:
__UpperCamelCase = euclidean(__A ,dataset[0] )
__UpperCamelCase = dataset[0].tolist()
for dataset_value in dataset[1:]:
__UpperCamelCase = euclidean(__A ,__A )
if dist > temp_dist:
__UpperCamelCase = temp_dist
__UpperCamelCase = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _lowercase ( __A ,__A ):
'''simple docstring'''
return np.dot(__A ,__A ) / (norm(__A ) * norm(__A ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 | 1 |
'''simple docstring'''
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
a__ : List[Any] = 6_37_81_37.0
a__ : Any = 6_35_67_52.31_42_45
a__ : Optional[Any] = 6_3_7_8_1_3_7
def _lowercase ( __A ,__A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
__UpperCamelCase = atan((1 - flattening) * tan(radians(__A ) ) )
__UpperCamelCase = atan((1 - flattening) * tan(radians(__A ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
__UpperCamelCase = haversine_distance(__A ,__A ,__A ,__A ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
__UpperCamelCase = (b_lata + b_lata) / 2
__UpperCamelCase = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
__UpperCamelCase = (sin(__A ) ** 2) * (cos(__A ) ** 2)
__UpperCamelCase = cos(sigma / 2 ) ** 2
__UpperCamelCase = (sigma - sin(__A )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
__UpperCamelCase = (cos(__A ) ** 2) * (sin(__A ) ** 2)
__UpperCamelCase = sin(sigma / 2 ) ** 2
__UpperCamelCase = (sigma + sin(__A )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 |
'''simple docstring'''
from datetime import datetime
import requests
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url="""
__UpperCamelCase = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""]
return requests.get(__A ).content
if __name__ == "__main__":
a__ : int = input('Enter Video/IGTV url: ').strip()
a__ : int = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''')
| 349 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
from collections.abc import Callable
def _lowercase ( __A ,__A ,__A ,__A = 100 ,):
'''simple docstring'''
__UpperCamelCase = x_start
__UpperCamelCase = fnc(__A )
__UpperCamelCase = 0.0
for _ in range(__A ):
# Approximates curve as a sequence of linear lines and sums their length
__UpperCamelCase = (x_end - x_start) / steps + xa
__UpperCamelCase = fnc(__A )
length += math.hypot(xa - xa ,fxa - fxa )
# Increment step
__UpperCamelCase = xa
__UpperCamelCase = fxa
return length
if __name__ == "__main__":
def _lowercase ( __A ):
'''simple docstring'''
return math.sin(10 * x )
print('f(x) = sin(10 * x)')
print('The length of the curve from x = -10 to x = 10 is:')
a__ : Dict = 1_0
while i <= 1_0_0_0_0_0:
print(f'''With {i} steps: {line_length(f, -1_0, 1_0, i)}''')
i *= 1_0
| 349 |
'''simple docstring'''
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _lowercase ( __A ,__A=False ):
'''simple docstring'''
try:
__UpperCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__UpperCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
__UpperCamelCase = strtobool(__A )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no." )
return _value
a__ : Optional[Any] = parse_flag_from_env('RUN_SLOW', default=False)
a__ : Union[str, Any] = parse_flag_from_env('RUN_REMOTE', default=False)
a__ : Any = parse_flag_from_env('RUN_LOCAL', default=True)
a__ : List[Any] = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
a__ : Optional[int] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
a__ : Optional[int] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
a__ : Optional[Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
a__ : List[Any] = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
a__ : str = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
a__ : str = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
a__ : Tuple = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def _lowercase ( __A ):
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
__UpperCamelCase = unittest.skip("""test requires faiss""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import regex # noqa
except ImportError:
__UpperCamelCase = unittest.skip("""test requires regex""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
__UpperCamelCase = unittest.skip("""test requires elasticsearch""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
__UpperCamelCase = unittest.skip("""test requires sqlalchemy""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not config.TORCH_AVAILABLE:
__UpperCamelCase = unittest.skip("""test requires PyTorch""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not config.TF_AVAILABLE:
__UpperCamelCase = unittest.skip("""test requires TensorFlow""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not config.JAX_AVAILABLE:
__UpperCamelCase = unittest.skip("""test requires JAX""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not config.PIL_AVAILABLE:
__UpperCamelCase = unittest.skip("""test requires Pillow""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("""test requires transformers""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("""test requires tiktoken""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("""test requires spacy""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
def _require_spacy_model(__A ):
try:
import spacy # noqa F401
spacy.load(__A )
except ImportError:
return unittest.skip("""test requires spacy""" )(__A )
except OSError:
return unittest.skip("""test requires spacy model '{}'""".format(__A ) )(__A )
else:
return test_case
return _require_spacy_model
def _lowercase ( __A ):
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("""test requires pyspark""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("""test requires joblibspark""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
__UpperCamelCase = unittest.skip("""test is slow""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
__UpperCamelCase = unittest.skip("""test is local""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
__UpperCamelCase = unittest.skip("""test is packaged""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
__UpperCamelCase = unittest.skip("""test requires remote""" )(__A )
return test_case
def _lowercase ( *__A ):
'''simple docstring'''
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(__A ) and name.startswith("""test""" ):
for decorator in decorators:
__UpperCamelCase = decorator(__A )
setattr(cls ,__A ,__A )
return cls
return decorate
class UpperCAmelCase__ ( UpperCAmelCase_):
pass
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 2
@contextmanager
def _lowercase ( __A=OfflineSimulationMode.CONNECTION_FAILS ,__A=1E-16 ):
'''simple docstring'''
__UpperCamelCase = requests.Session().request
def timeout_request(__A ,__A ,__A ,**__A ):
# Change the url to an invalid url so that the connection hangs
__UpperCamelCase = """https://10.255.255.1"""
if kwargs.get("""timeout""" ) is None:
raise RequestWouldHangIndefinitelyError(
f"Tried a call to {url} in offline mode with no timeout set. Please set a timeout." )
__UpperCamelCase = timeout
try:
return online_request(__A ,__A ,**__A )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__UpperCamelCase = url
__UpperCamelCase = e.args[0]
__UpperCamelCase = (max_retry_error.args[0].replace("""10.255.255.1""" ,f"OfflineMock[{url}]" ),)
__UpperCamelCase = (max_retry_error,)
raise
def raise_connection_error(__A ,__A ,**__A ):
raise requests.ConnectionError("""Offline mode is enabled.""" ,request=__A )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("""requests.Session.send""" ,__A ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("""requests.Session.request""" ,__A ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("""datasets.config.HF_DATASETS_OFFLINE""" ,__A ):
yield
else:
raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" )
@contextmanager
def _lowercase ( *__A ,**__A ):
'''simple docstring'''
__UpperCamelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__A ,**__A ) as tmp_dir:
try:
os.chdir(__A )
yield
finally:
os.chdir(__A )
@contextmanager
def _lowercase ( ):
'''simple docstring'''
import gc
gc.collect()
__UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _lowercase ( ):
'''simple docstring'''
import gc
gc.collect()
__UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _lowercase ( __A ,__A ):
'''simple docstring'''
return deepcopy(__A ).integers(0 ,100 ,10 ).tolist() == deepcopy(__A ).integers(0 ,100 ,10 ).tolist()
def _lowercase ( __A ):
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(__A ,*__A ,**__A ):
try:
return func(*__A ,**__A )
except HTTPError as err:
if str(__A ).startswith("""500""" ) or str(__A ).startswith("""502""" ):
pytest.xfail(str(__A ) )
raise err
return decorator.decorator(_wrapper ,__A )
class UpperCAmelCase__ :
def __init__( self , lowercase , lowercase , lowercase ) -> str:
__UpperCamelCase = returncode
__UpperCamelCase = stdout
__UpperCamelCase = stderr
async def _lowercase ( __A ,__A ):
'''simple docstring'''
while True:
__UpperCamelCase = await stream.readline()
if line:
callback(__A )
else:
break
async def _lowercase ( __A ,__A=None ,__A=None ,__A=None ,__A=False ,__A=False ):
'''simple docstring'''
if echo:
print("""\nRunning: """ ,""" """.join(__A ) )
__UpperCamelCase = await asyncio.create_subprocess_exec(
cmd[0] ,*cmd[1:] ,stdin=__A ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=__A ,)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__UpperCamelCase = []
__UpperCamelCase = []
def tee(__A ,__A ,__A ,__A="" ):
__UpperCamelCase = line.decode("""utf-8""" ).rstrip()
sink.append(__A )
if not quiet:
print(__A ,__A ,file=__A )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout ,lambda __A : tee(__A ,__A ,sys.stdout ,label="""stdout:""" ) ),
_read_stream(p.stderr ,lambda __A : tee(__A ,__A ,sys.stderr ,label="""stderr:""" ) ),
] ,timeout=__A ,)
return _RunOutput(await p.wait() ,__A ,__A )
def _lowercase ( __A ,__A=None ,__A=None ,__A=180 ,__A=False ,__A=True ):
'''simple docstring'''
__UpperCamelCase = asyncio.get_event_loop()
__UpperCamelCase = loop.run_until_complete(
_stream_subprocess(__A ,env=__A ,stdin=__A ,timeout=__A ,quiet=__A ,echo=__A ) )
__UpperCamelCase = """ """.join(__A )
if result.returncode > 0:
__UpperCamelCase = """\n""".join(result.stderr )
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output." )
return result
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = os.environ.get("""PYTEST_XDIST_WORKER""" ,"""gw0""" )
__UpperCamelCase = re.sub(R"""^gw""" ,"""""" ,__A ,0 ,re.M )
return int(__A )
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = 29_500
__UpperCamelCase = pytest_xdist_worker_id()
return port + uniq_delta
| 349 | 1 |
'''simple docstring'''
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class UpperCAmelCase__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = FlaxMTaForConditionalGeneration.from_pretrained("""google/mt5-small""" )
__UpperCamelCase = AutoTokenizer.from_pretrained("""google/mt5-small""" )
__UpperCamelCase = tokenizer("""Hello there""" , return_tensors="""np""" ).input_ids
__UpperCamelCase = tokenizer("""Hi I am""" , return_tensors="""np""" ).input_ids
__UpperCamelCase = shift_tokens_right(lowercase , model.config.pad_token_id , model.config.decoder_start_token_id )
__UpperCamelCase = model(lowercase , decoder_input_ids=lowercase ).logits
__UpperCamelCase = optax.softmax_cross_entropy(lowercase , onehot(lowercase , logits.shape[-1] ) ).mean()
__UpperCamelCase = -(labels.shape[-1] * loss.item())
__UpperCamelCase = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 349 |
'''simple docstring'''
import re
def _lowercase ( __A ):
'''simple docstring'''
return [char.split() for char in re.split(R"""[^ a-z A-Z 0-9 \s]""" ,str_ )]
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = split_input(str_ )
return "".join(
["""""".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
try:
__UpperCamelCase = split_input(__A )
if upper:
__UpperCamelCase = """""".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
__UpperCamelCase = """""".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def _lowercase ( __A ):
'''simple docstring'''
return to_simple_case(__A )
def _lowercase ( __A ):
'''simple docstring'''
try:
__UpperCamelCase = to_simple_case(__A )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def _lowercase ( __A ,__A ):
'''simple docstring'''
return to_complex_case(__A ,__A ,"""_""" )
def _lowercase ( __A ,__A ):
'''simple docstring'''
return to_complex_case(__A ,__A ,"""-""" )
if __name__ == "__main__":
__import__('doctest').testmod()
| 349 | 1 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def _lowercase ( __A ):
'''simple docstring'''
return DownloadCommand(args.model ,args.cache_dir ,args.force ,args.trust_remote_code )
class UpperCAmelCase__ ( UpperCAmelCase_):
@staticmethod
def __lowerCamelCase ( lowercase ) -> Any:
__UpperCamelCase = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" , type=lowercase , default=lowercase , help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" , action="""store_true""" , help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" , action="""store_true""" , help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" , )
download_parser.add_argument("""model""" , type=lowercase , help="""Name of the model to download""" )
download_parser.set_defaults(func=lowercase )
def __init__( self , lowercase , lowercase , lowercase , lowercase ) -> Tuple:
__UpperCamelCase = model
__UpperCamelCase = cache
__UpperCamelCase = force
__UpperCamelCase = trust_remote_code
def __lowerCamelCase ( self ) -> Union[str, Any]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 349 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowerCamelCase ( self ) -> int:
__UpperCamelCase = 1
__UpperCamelCase = 3
__UpperCamelCase = (3_2, 3_2)
__UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowercase )
return image
@property
def __lowerCamelCase ( self ) -> Dict:
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
return model
@property
def __lowerCamelCase ( self ) -> List[str]:
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def __lowerCamelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
__UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(lowercase )
@property
def __lowerCamelCase ( self ) -> Tuple:
def extract(*lowercase , **lowercase ):
class UpperCAmelCase__ :
def __init__( self ) -> Tuple:
__UpperCamelCase = torch.ones([0] )
def __lowerCamelCase ( self , lowercase ) -> List[str]:
self.pixel_values.to(lowercase )
return self
return Out()
return extract
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.dummy_cond_unet
__UpperCamelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowercase , set_alpha_to_one=lowercase , )
__UpperCamelCase = self.dummy_vae
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
__UpperCamelCase = StableDiffusionPipeline(
unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = """A painting of a squirrel eating a burger"""
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 )
__UpperCamelCase = sd_pipe([prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
__UpperCamelCase = output.images
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=lowercase , )[0]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__UpperCamelCase = np.array([0.5_756, 0.6_118, 0.5_005, 0.5_041, 0.5_471, 0.4_726, 0.4_976, 0.4_865, 0.4_864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ) -> Tuple:
__UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.dummy_cond_unet
__UpperCamelCase = PNDMScheduler(skip_prk_steps=lowercase )
__UpperCamelCase = self.dummy_vae
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
__UpperCamelCase = StableDiffusionPipeline(
unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = """A painting of a squirrel eating a burger"""
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 )
__UpperCamelCase = sd_pipe([prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
__UpperCamelCase = output.images
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=lowercase , )[0]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__UpperCamelCase = np.array([0.5_125, 0.5_716, 0.4_828, 0.5_060, 0.5_650, 0.4_768, 0.5_185, 0.4_895, 0.4_993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=lowercase )
assert isinstance(lowercase , lowercase )
assert isinstance(pipe.scheduler , lowercase )
assert pipe.safety_checker is None
__UpperCamelCase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowercase )
__UpperCamelCase = StableDiffusionPipeline.from_pretrained(lowercase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__UpperCamelCase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def __lowerCamelCase ( self ) -> Optional[int]:
__UpperCamelCase = self.dummy_cond_unet
__UpperCamelCase = PNDMScheduler(skip_prk_steps=lowercase )
__UpperCamelCase = self.dummy_vae
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
__UpperCamelCase = unet.half()
__UpperCamelCase = vae.half()
__UpperCamelCase = bert.half()
# make sure here that pndm scheduler skips prk
__UpperCamelCase = StableDiffusionPipeline(
unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = """A painting of a squirrel eating a burger"""
__UpperCamelCase = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images
assert image.shape == (1, 6_4, 6_4, 3)
@nightly
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=lowercase )
__UpperCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
__UpperCamelCase = 4_0_0_3_6_6_0_3_4_6
__UpperCamelCase = 7
# without safety guidance (sld_guidance_scale = 0)
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = [0.2_278, 0.2_231, 0.2_249, 0.2_333, 0.2_303, 0.1_885, 0.2_273, 0.2_144, 0.2_176]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = [0.2_383, 0.2_276, 0.236, 0.2_192, 0.2_186, 0.2_053, 0.1_971, 0.1_901, 0.1_719]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=lowercase )
__UpperCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = """padme amidala taking a bath artwork, safe for work, no nudity"""
__UpperCamelCase = 2_7_3_4_9_7_1_7_5_5
__UpperCamelCase = 7
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = [0.3_502, 0.3_622, 0.3_396, 0.3_642, 0.3_478, 0.3_318, 0.35, 0.3_348, 0.3_297]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = [0.5_531, 0.5_206, 0.4_895, 0.5_156, 0.5_182, 0.4_751, 0.4_802, 0.4_803, 0.4_443]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
__UpperCamelCase = 1_0_4_4_3_5_5_2_3_4
__UpperCamelCase = 1_2
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = np.array([0.5_818, 0.6_285, 0.6_835, 0.6_019, 0.625, 0.6_754, 0.6_096, 0.6_334, 0.6_561] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 349 | 1 |
'''simple docstring'''
def _lowercase ( __A ,__A ,__A = 0 ,__A = 0 ):
'''simple docstring'''
__UpperCamelCase = right or len(__A ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(__A ,__A ,left + 1 ,right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Any:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> int:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> Dict:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> List[str]:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> int:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Any:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Dict:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> Union[str, Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Dict:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> int:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> List[str]:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> int:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> str:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> int:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> Any:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> List[str]:
requires_backends(cls , ["""flax"""] )
| 349 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def _lowercase ( __A ):
'''simple docstring'''
create_state_space_tree(__A ,[] ,0 )
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
if index == len(__A ):
print(__A )
return
create_state_space_tree(__A ,__A ,index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(__A ,__A ,index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
a__ : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['A', 'B', 'C'])
generate_all_subsequences(seq)
| 349 |
'''simple docstring'''
import logging
import os
from .state import PartialState
class UpperCAmelCase__ ( logging.LoggerAdapter):
@staticmethod
def __lowerCamelCase ( lowercase ) -> Dict:
__UpperCamelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __lowerCamelCase ( self , lowercase , lowercase , *lowercase , **lowercase ) -> List[str]:
if PartialState._shared_state == {}:
raise RuntimeError(
"""You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.""" )
__UpperCamelCase = kwargs.pop("""main_process_only""" , lowercase )
__UpperCamelCase = kwargs.pop("""in_order""" , lowercase )
if self.isEnabledFor(lowercase ):
if self._should_log(lowercase ):
__UpperCamelCase , __UpperCamelCase = self.process(lowercase , lowercase )
self.logger.log(lowercase , lowercase , *lowercase , **lowercase )
elif in_order:
__UpperCamelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__UpperCamelCase , __UpperCamelCase = self.process(lowercase , lowercase )
self.logger.log(lowercase , lowercase , *lowercase , **lowercase )
state.wait_for_everyone()
def _lowercase ( __A ,__A = None ):
'''simple docstring'''
if log_level is None:
__UpperCamelCase = os.environ.get("""ACCELERATE_LOG_LEVEL""" ,__A )
__UpperCamelCase = logging.getLogger(__A )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(__A ,{} )
| 349 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = LEDTokenizer
__SCREAMING_SNAKE_CASE = LEDTokenizerFast
__SCREAMING_SNAKE_CASE = True
def __lowerCamelCase ( self ) -> Any:
super().setUp()
__UpperCamelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__UpperCamelCase = dict(zip(lowercase , range(len(lowercase ) ) ) )
__UpperCamelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__UpperCamelCase = {"""unk_token""": """<unk>"""}
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowercase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowercase ) )
def __lowerCamelCase ( self , **lowercase ) -> str:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def __lowerCamelCase ( self , **lowercase ) -> Any:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def __lowerCamelCase ( self , lowercase ) -> Dict:
return "lower newer", "lower newer"
@cached_property
def __lowerCamelCase ( self ) -> Any:
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def __lowerCamelCase ( self ) -> int:
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__UpperCamelCase = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCamelCase = tokenizer(lowercase , max_length=len(lowercase ) , padding=lowercase , return_tensors="""pt""" )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
__UpperCamelCase = batch.input_ids.tolist()[0]
self.assertListEqual(lowercase , lowercase )
@require_torch
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCamelCase = tokenizer(lowercase , padding=lowercase , return_tensors="""pt""" )
self.assertIn("""input_ids""" , lowercase )
self.assertIn("""attention_mask""" , lowercase )
self.assertNotIn("""labels""" , lowercase )
self.assertNotIn("""decoder_attention_mask""" , lowercase )
@require_torch
def __lowerCamelCase ( self ) -> int:
__UpperCamelCase = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCamelCase = tokenizer(text_target=lowercase , max_length=3_2 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(3_2 , targets["""input_ids"""].shape[1] )
@require_torch
def __lowerCamelCase ( self ) -> Optional[int]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCamelCase = tokenizer(
["""I am a small frog""" * 1_0_2_4, """I am a small frog"""] , padding=lowercase , truncation=lowercase , return_tensors="""pt""" )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2_2) )
@require_torch
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = ["""A long paragraph for summarization."""]
__UpperCamelCase = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCamelCase = tokenizer(lowercase , return_tensors="""pt""" )
__UpperCamelCase = tokenizer(text_target=lowercase , return_tensors="""pt""" )
__UpperCamelCase = inputs["""input_ids"""]
__UpperCamelCase = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __lowerCamelCase ( self ) -> Tuple:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCamelCase = ["""Summary of the text.""", """Another summary."""]
__UpperCamelCase = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
__UpperCamelCase = tokenizer(lowercase , padding=lowercase )
__UpperCamelCase = [[0] * len(lowercase ) for x in encoded_output["""input_ids"""]]
__UpperCamelCase = tokenizer.pad(lowercase )
self.assertSequenceEqual(outputs["""global_attention_mask"""] , lowercase )
def __lowerCamelCase ( self ) -> Dict:
pass
def __lowerCamelCase ( self ) -> List[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__UpperCamelCase = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
__UpperCamelCase = self.tokenizer_class.from_pretrained(lowercase , **lowercase )
__UpperCamelCase = """A, <mask> AllenNLP sentence."""
__UpperCamelCase = tokenizer_r.encode_plus(lowercase , add_special_tokens=lowercase , return_token_type_ids=lowercase )
__UpperCamelCase = tokenizer_p.encode_plus(lowercase , add_special_tokens=lowercase , return_token_type_ids=lowercase )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__UpperCamelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__UpperCamelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
lowercase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
lowercase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 349 |
'''simple docstring'''
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
a__ : Optional[Any] = logging.getLogger(__name__)
class UpperCAmelCase__ :
def __init__( self ) -> Any:
__UpperCamelCase = False
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase ) -> str:
if not self.initialized:
__UpperCamelCase = RagRetriever(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , )
__UpperCamelCase = True
def __lowerCamelCase ( self ) -> Optional[Any]:
self.retriever.index.init_index()
def __lowerCamelCase ( self , lowercase , lowercase ) -> Dict:
__UpperCamelCase , __UpperCamelCase = self.retriever._main_retrieve(lowercase , lowercase )
return doc_ids, retrieved_doc_embeds
class UpperCAmelCase__ ( UpperCAmelCase_):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase=None ) -> List[Any]:
if index is not None and index.is_initialized() and len(lowercase ) > 0:
raise ValueError(
"""When using Ray for distributed fine-tuning, """
"""you'll need to provide the paths instead, """
"""as the dataset and the index are loaded """
"""separately. More info in examples/rag/use_own_knowledge_dataset.py """ )
super().__init__(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , )
__UpperCamelCase = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(lowercase , lowercase , lowercase , lowercase )
for worker in self.retrieval_workers
] )
def __lowerCamelCase ( self ) -> Dict:
logger.info("""initializing retrieval""" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __lowerCamelCase ( self , lowercase , lowercase ) -> List[str]:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__UpperCamelCase = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__UpperCamelCase , __UpperCamelCase = ray.get(random_worker.retrieve.remote(lowercase , lowercase ) )
else:
__UpperCamelCase , __UpperCamelCase = self._main_retrieve(lowercase , lowercase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowercase )
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase=None , **lowercase ) -> Any:
return super(lowercase , cls ).get_tokenizers(lowercase , lowercase , **lowercase )
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase , lowercase=None , **lowercase ) -> int:
__UpperCamelCase = kwargs.pop("""config""" , lowercase ) or RagConfig.from_pretrained(lowercase , **lowercase )
__UpperCamelCase = RagTokenizer.from_pretrained(lowercase , config=lowercase )
__UpperCamelCase = rag_tokenizer.question_encoder
__UpperCamelCase = rag_tokenizer.generator
if indexed_dataset is not None:
__UpperCamelCase = """custom"""
__UpperCamelCase = CustomHFIndex(config.retrieval_vector_size , lowercase )
else:
__UpperCamelCase = cls._build_index(lowercase )
return cls(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , retrieval_workers=lowercase , index=lowercase , )
| 349 | 1 |
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
a__ : Any = get_logger(__name__)
class UpperCAmelCase__ :
def __init__( self , lowercase = None ) -> List[str]:
__UpperCamelCase = (
os.path.join(lowercase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
__UpperCamelCase = Extractor
def __lowerCamelCase ( self , lowercase ) -> str:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
__UpperCamelCase = os.path.abspath(lowercase )
return os.path.join(self.extract_dir , hash_url_to_filename(lowercase ) )
def __lowerCamelCase ( self , lowercase , lowercase ) -> bool:
return force_extract or (
not os.path.isfile(lowercase ) and not (os.path.isdir(lowercase ) and os.listdir(lowercase ))
)
def __lowerCamelCase ( self , lowercase , lowercase = False ) -> str:
__UpperCamelCase = self.extractor.infer_extractor_format(lowercase )
if not extractor_format:
return input_path
__UpperCamelCase = self._get_output_path(lowercase )
if self._do_extract(lowercase , lowercase ):
self.extractor.extract(lowercase , lowercase , lowercase )
return output_path
class UpperCAmelCase__ ( UpperCAmelCase_):
@classmethod
@abstractmethod
def __lowerCamelCase ( cls , lowercase , **lowercase ) -> bool:
...
@staticmethod
@abstractmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
...
class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = []
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> int:
with open(lowercase , """rb""" ) as f:
return f.read(lowercase )
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase = b"" ) -> bool:
if not magic_number:
__UpperCamelCase = max(len(lowercase ) for cls_magic_number in cls.magic_numbers )
try:
__UpperCamelCase = cls.read_magic_number(lowercase , lowercase )
except OSError:
return False
return any(magic_number.startswith(lowercase ) for cls_magic_number in cls.magic_numbers )
class UpperCAmelCase__ ( UpperCAmelCase_):
@classmethod
def __lowerCamelCase ( cls , lowercase , **lowercase ) -> bool:
return tarfile.is_tarfile(lowercase )
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> str:
def resolved(lowercase ) -> str:
return os.path.realpath(os.path.abspath(lowercase ) )
def badpath(lowercase , lowercase ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(lowercase , lowercase ) ).startswith(lowercase )
def badlink(lowercase , lowercase ) -> bool:
# Links are interpreted relative to the directory containing the link
__UpperCamelCase = resolved(os.path.join(lowercase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=lowercase )
__UpperCamelCase = resolved(lowercase )
for finfo in members:
if badpath(finfo.name , lowercase ):
logger.error(f"Extraction of {finfo.name} is blocked (illegal path)" )
elif finfo.issym() and badlink(lowercase , lowercase ):
logger.error(f"Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}" )
elif finfo.islnk() and badlink(lowercase , lowercase ):
logger.error(f"Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}" )
else:
yield finfo
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
os.makedirs(lowercase , exist_ok=lowercase )
__UpperCamelCase = tarfile.open(lowercase )
tar_file.extractall(lowercase , members=TarExtractor.safemembers(lowercase , lowercase ) )
tar_file.close()
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\x1F\x8B''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
with gzip.open(lowercase , """rb""" ) as gzip_file:
with open(lowercase , """wb""" ) as extracted_file:
shutil.copyfileobj(lowercase , lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [
B'''PK\x03\x04''',
B'''PK\x05\x06''', # empty archive
B'''PK\x07\x08''', # spanned archive
]
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase = b"" ) -> bool:
if super().is_extractable(lowercase , magic_number=lowercase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(lowercase , """rb""" ) as fp:
__UpperCamelCase = _EndRecData(lowercase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
__UpperCamelCase = fp.read(lowercase ) # CD is where we expect it to be
if len(lowercase ) == sizeCentralDir:
__UpperCamelCase = struct.unpack(lowercase , lowercase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
os.makedirs(lowercase , exist_ok=lowercase )
with zipfile.ZipFile(lowercase , """r""" ) as zip_file:
zip_file.extractall(lowercase )
zip_file.close()
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\xFD\x37\x7A\x58\x5A\x00''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
with lzma.open(lowercase ) as compressed_file:
with open(lowercase , """wb""" ) as extracted_file:
shutil.copyfileobj(lowercase , lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''Rar!\x1a\x07\x00''', B'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(lowercase , exist_ok=lowercase )
__UpperCamelCase = rarfile.RarFile(lowercase )
rf.extractall(lowercase )
rf.close()
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\x28\xb5\x2F\xFD''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
__UpperCamelCase = zstd.ZstdDecompressor()
with open(lowercase , """rb""" ) as ifh, open(lowercase , """wb""" ) as ofh:
dctx.copy_stream(lowercase , lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\x42\x5A\x68''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
with bza.open(lowercase , """rb""" ) as compressed_file:
with open(lowercase , """wb""" ) as extracted_file:
shutil.copyfileobj(lowercase , lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\x37\x7A\xBC\xAF\x27\x1C''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(lowercase , exist_ok=lowercase )
with pyazr.SevenZipFile(lowercase , """r""" ) as archive:
archive.extractall(lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\x04\x22\x4D\x18''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(lowercase , """rb""" ) as compressed_file:
with open(lowercase , """wb""" ) as extracted_file:
shutil.copyfileobj(lowercase , lowercase )
class UpperCAmelCase__ :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
__SCREAMING_SNAKE_CASE = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def __lowerCamelCase ( cls ) -> Union[str, Any]:
return max(
len(lowercase )
for extractor in cls.extractors.values()
if issubclass(lowercase , lowercase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> str:
try:
return MagicNumberBaseExtractor.read_magic_number(lowercase , magic_number_length=lowercase )
except OSError:
return b""
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase = False ) -> bool:
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=lowercase , )
__UpperCamelCase = cls.infer_extractor_format(lowercase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def __lowerCamelCase ( cls , lowercase ) -> str: # <Added version="2.4.0"/>
__UpperCamelCase = cls._get_magic_number_max_length()
__UpperCamelCase = cls._read_magic_number(lowercase , lowercase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(lowercase , magic_number=lowercase ):
return extractor_format
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase , lowercase = None , lowercase = "deprecated" , ) -> None:
os.makedirs(os.path.dirname(lowercase ) , exist_ok=lowercase )
# Prevent parallel extractions
__UpperCamelCase = str(Path(lowercase ).with_suffix(""".lock""" ) )
with FileLock(lowercase ):
shutil.rmtree(lowercase , ignore_errors=lowercase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(lowercase , lowercase ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=lowercase , )
__UpperCamelCase = extractor if extractor != """deprecated""" else extractor_format
else:
__UpperCamelCase = cls.extractors[extractor_format]
return extractor.extract(lowercase , lowercase )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=lowercase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(lowercase ):
return extractor.extract(lowercase , lowercase )
| 349 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : Union[str, Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a__ : Any = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
a__ : Optional[Any] = {
'squeezebert/squeezebert-uncased': 5_1_2,
'squeezebert/squeezebert-mnli': 5_1_2,
'squeezebert/squeezebert-mnli-headless': 5_1_2,
}
a__ : Optional[Any] = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = SqueezeBertTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ) -> Tuple:
super().__init__(
lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , )
__UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowercase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowercase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowercase ) != tokenize_chinese_chars
):
__UpperCamelCase = getattr(lowercase , normalizer_state.pop("""type""" ) )
__UpperCamelCase = do_lower_case
__UpperCamelCase = strip_accents
__UpperCamelCase = tokenize_chinese_chars
__UpperCamelCase = normalizer_class(**lowercase )
__UpperCamelCase = do_lower_case
def __lowerCamelCase ( self , lowercase , lowercase=None ) -> Tuple:
__UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> List[int]:
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> Tuple[str]:
__UpperCamelCase = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
| 349 | 1 |
'''simple docstring'''
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
a__ : Any = argparse.ArgumentParser()
parser.add_argument('--user', type=str, default='ubuntu')
parser.add_argument('--host', type=str, default='localhost')
parser.add_argument('--key_path', type=str, default=None)
parser.add_argument('--instance', type=str, default='V100:1')
parser.add_argument('--provider', type=str, default='cheapest')
parser.add_argument('--use_spot', type=bool, default=False)
parser.add_argument('--example', type=str, default='pytorch/text-generation/run_generation.py')
a__ , a__ : List[str] = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('Cannot specify both BYO and on-demand cluster args')
a__ : Union[str, Any] = rh.cluster(
name='rh-cluster', ips=[args.host], ssh_creds={'ssh_user': args.user, 'ssh_private_key': args.key_path}
)
else:
a__ : Optional[Any] = rh.cluster(
name='rh-cluster', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
a__ : Any = args.example.rsplit('/', 1)[0]
# Set up remote environment
cluster.install_packages(['pip:./']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(['pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f'''python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 349 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
a__ : str = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 349 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ : Dict = logging.get_logger(__name__)
a__ : Tuple = {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json',
'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json',
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json',
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json',
'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json',
'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json',
'cl-tohoku/bert-base-japanese-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'
),
'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = '''bert'''
def __init__( self , lowercase=3_0_5_2_2 , lowercase=7_6_8 , lowercase=1_2 , lowercase=1_2 , lowercase=3_0_7_2 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=5_1_2 , lowercase=2 , lowercase=0.02 , lowercase=1E-12 , lowercase=0 , lowercase="absolute" , lowercase=True , lowercase=None , **lowercase , ) -> Dict:
super().__init__(pad_token_id=lowercase , **lowercase )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = position_embedding_type
__UpperCamelCase = use_cache
__UpperCamelCase = classifier_dropout
class UpperCAmelCase__ ( UpperCAmelCase_):
@property
def __lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__UpperCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__UpperCamelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 349 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
a__ : Union[str, Any] = {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json',
}
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = '''albert'''
def __init__( self , lowercase=3_0_0_0_0 , lowercase=1_2_8 , lowercase=4_0_9_6 , lowercase=1_2 , lowercase=1 , lowercase=6_4 , lowercase=1_6_3_8_4 , lowercase=1 , lowercase="gelu_new" , lowercase=0 , lowercase=0 , lowercase=5_1_2 , lowercase=2 , lowercase=0.02 , lowercase=1E-12 , lowercase=0.1 , lowercase="absolute" , lowercase=0 , lowercase=2 , lowercase=3 , **lowercase , ) -> Any:
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
__UpperCamelCase = vocab_size
__UpperCamelCase = embedding_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_hidden_groups
__UpperCamelCase = num_attention_heads
__UpperCamelCase = inner_group_num
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = classifier_dropout_prob
__UpperCamelCase = position_embedding_type
class UpperCAmelCase__ ( UpperCAmelCase_):
@property
def __lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__UpperCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__UpperCamelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 349 | 1 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" ,type=__A ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" ,type=__A ,help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) ,)
# rest from the training program
parser.add_argument("""training_script_args""" ,nargs=__A )
return parser.parse_args()
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = parse_args()
# Import training_script as a module.
__UpperCamelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__UpperCamelCase = script_fpath.stem
__UpperCamelCase = importlib.import_module(__A )
# Patch sys.argv
__UpperCamelCase = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 349 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def _lowercase ( __A ):
'''simple docstring'''
return (data["data"], data["target"])
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = XGBRegressor(verbosity=0 ,random_state=42 )
xgb.fit(__A ,__A )
# Predict target for test data
__UpperCamelCase = xgb.predict(__A )
__UpperCamelCase = predictions.reshape(len(__A ) ,1 )
return predictions
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = fetch_california_housing()
__UpperCamelCase , __UpperCamelCase = data_handling(__A )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = train_test_split(
__A ,__A ,test_size=0.25 ,random_state=1 )
__UpperCamelCase = xgboost(__A ,__A ,__A )
# Error printing
print(f"Mean Absolute Error : {mean_absolute_error(__A ,__A )}" )
print(f"Mean Square Error : {mean_squared_error(__A ,__A )}" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 349 | 1 |
'''simple docstring'''
import math
def _lowercase ( __A ,__A ):
'''simple docstring'''
__UpperCamelCase = len(__A )
__UpperCamelCase = int(math.floor(math.sqrt(__A ) ) )
__UpperCamelCase = 0
while arr[min(__A ,__A ) - 1] < x:
__UpperCamelCase = step
step += int(math.floor(math.sqrt(__A ) ) )
if prev >= n:
return -1
while arr[prev] < x:
__UpperCamelCase = prev + 1
if prev == min(__A ,__A ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
a__ : Optional[Any] = input('Enter numbers separated by a comma:\n').strip()
a__ : Dict = [int(item) for item in user_input.split(',')]
a__ : Optional[Any] = int(input('Enter the number to be searched:\n'))
a__ : List[str] = jump_search(arr, x)
if res == -1:
print('Number not found!')
else:
print(f'''Number {x} is at index {res}''')
| 349 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class UpperCAmelCase__ :
__SCREAMING_SNAKE_CASE = PegasusConfig
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = '''gelu'''
def __init__( self , lowercase , lowercase=1_3 , lowercase=7 , lowercase=True , lowercase=False , lowercase=9_9 , lowercase=3_2 , lowercase=2 , lowercase=4 , lowercase=3_7 , lowercase=0.1 , lowercase=0.1 , lowercase=4_0 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> Any:
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = eos_token_id
__UpperCamelCase = pad_token_id
__UpperCamelCase = bos_token_id
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__UpperCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__UpperCamelCase = prepare_pegasus_inputs_dict(lowercase , lowercase , lowercase )
return config, inputs_dict
def __lowerCamelCase ( self , lowercase , lowercase ) -> Union[str, Any]:
__UpperCamelCase = TFPegasusModel(config=lowercase ).get_decoder()
__UpperCamelCase = inputs_dict["""input_ids"""]
__UpperCamelCase = input_ids[:1, :]
__UpperCamelCase = inputs_dict["""attention_mask"""][:1, :]
__UpperCamelCase = inputs_dict["""head_mask"""]
__UpperCamelCase = 1
# first forward pass
__UpperCamelCase = model(lowercase , attention_mask=lowercase , head_mask=lowercase , use_cache=lowercase )
__UpperCamelCase , __UpperCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__UpperCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
__UpperCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__UpperCamelCase = model(lowercase , attention_mask=lowercase )[0]
__UpperCamelCase = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__UpperCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx]
__UpperCamelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 )
def _lowercase ( __A ,__A ,__A ,__A=None ,__A=None ,__A=None ,__A=None ,__A=None ,):
'''simple docstring'''
if attention_mask is None:
__UpperCamelCase = tf.cast(tf.math.not_equal(__A ,config.pad_token_id ) ,tf.inta )
if decoder_attention_mask is None:
__UpperCamelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ),
] ,axis=-1 ,)
if head_mask is None:
__UpperCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = TFPegasusModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=lowercase )
def __lowerCamelCase ( self ) -> str:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ) -> Tuple:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCAmelCase__ ( unittest.TestCase):
__SCREAMING_SNAKE_CASE = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
__SCREAMING_SNAKE_CASE = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
__SCREAMING_SNAKE_CASE = '''google/pegasus-xsum'''
@cached_property
def __lowerCamelCase ( self ) -> int:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __lowerCamelCase ( self , **lowercase ) -> Optional[int]:
__UpperCamelCase = self.translate_src_text(**lowercase )
assert self.expected_text == generated_words
def __lowerCamelCase ( self , **lowercase ) -> Optional[Any]:
__UpperCamelCase = self.tokenizer(self.src_text , **lowercase , padding=lowercase , return_tensors="""tf""" )
__UpperCamelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowercase , )
__UpperCamelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase )
return generated_words
@slow
def __lowerCamelCase ( self ) -> Dict:
self._assert_generated_batch_equal_expected()
| 349 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a__ : Optional[int] = {
'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'],
'tokenization_canine': ['CanineTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST',
'CanineForMultipleChoice',
'CanineForQuestionAnswering',
'CanineForSequenceClassification',
'CanineForTokenClassification',
'CanineLayer',
'CanineModel',
'CaninePreTrainedModel',
'load_tf_weights_in_canine',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
a__ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 349 |
'''simple docstring'''
import string
def _lowercase ( __A ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
__UpperCamelCase = """"""
for symbol in message:
if symbol in string.ascii_uppercase:
__UpperCamelCase = string.ascii_uppercase.find(__A )
__UpperCamelCase = num - key
if num < 0:
__UpperCamelCase = num + len(string.ascii_uppercase )
__UpperCamelCase = translated + string.ascii_uppercase[num]
else:
__UpperCamelCase = translated + symbol
print(f"Decryption using Key #{key}: {translated}" )
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = input("""Encrypted message: """ )
__UpperCamelCase = message.upper()
decrypt(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 349 | 1 |
'''simple docstring'''
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class UpperCAmelCase__ :
def __init__( self , lowercase , lowercase=1_3 , lowercase=7 , lowercase=True , lowercase=True , lowercase=9_9 , lowercase=3_2 , lowercase=5 , lowercase=4 , lowercase=3_7 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=5_0 , lowercase=0.02 , lowercase=True , lowercase=None , ) -> Union[str, Any]:
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_input_mask
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = initializer_range
__UpperCamelCase = use_labels
__UpperCamelCase = scope
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = self.get_config()
return config, input_ids, input_mask, token_labels
def __lowerCamelCase ( self ) -> str:
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=lowercase , initializer_range=self.initializer_range , )
def __lowerCamelCase ( self ) -> int:
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) = self.prepare_config_and_inputs()
__UpperCamelCase = True
__UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , **lowercase , ) -> Union[str, Any]:
__UpperCamelCase = BertGenerationEncoder(config=lowercase )
model.to(lowercase )
model.eval()
__UpperCamelCase = model(lowercase , attention_mask=lowercase )
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , **lowercase , ) -> Any:
__UpperCamelCase = True
__UpperCamelCase = BertGenerationEncoder(config=lowercase )
model.to(lowercase )
model.eval()
__UpperCamelCase = model(
lowercase , attention_mask=lowercase , encoder_hidden_states=lowercase , encoder_attention_mask=lowercase , )
__UpperCamelCase = model(
lowercase , attention_mask=lowercase , encoder_hidden_states=lowercase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , **lowercase , ) -> Tuple:
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = BertGenerationDecoder(config=lowercase ).to(lowercase ).eval()
# first forward pass
__UpperCamelCase = model(
lowercase , attention_mask=lowercase , encoder_hidden_states=lowercase , encoder_attention_mask=lowercase , use_cache=lowercase , )
__UpperCamelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCamelCase = torch.cat([input_mask, next_mask] , dim=-1 )
__UpperCamelCase = model(
lowercase , attention_mask=lowercase , encoder_hidden_states=lowercase , encoder_attention_mask=lowercase , output_hidden_states=lowercase , )["""hidden_states"""][0]
__UpperCamelCase = model(
lowercase , attention_mask=lowercase , encoder_hidden_states=lowercase , encoder_attention_mask=lowercase , past_key_values=lowercase , output_hidden_states=lowercase , )["""hidden_states"""][0]
# select random slice
__UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-3 ) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , *lowercase , ) -> Tuple:
__UpperCamelCase = BertGenerationDecoder(lowercase )
model.to(lowercase )
model.eval()
__UpperCamelCase = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE = (BertGenerationDecoder,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE = (
{'''feature-extraction''': BertGenerationEncoder, '''text-generation''': BertGenerationDecoder}
if is_torch_available()
else {}
)
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = BertGenerationEncoderTester(self )
__UpperCamelCase = ConfigTester(self , config_class=lowercase , hidden_size=3_7 )
def __lowerCamelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ) -> int:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs()
__UpperCamelCase = """bert"""
self.model_tester.create_and_check_model(lowercase , lowercase , lowercase , lowercase )
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowercase )
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowercase )
def __lowerCamelCase ( self ) -> List[Any]:
# This regression test was failing with PyTorch < 1.3
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__UpperCamelCase = None
self.model_tester.create_and_check_model_as_decoder(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , )
def __lowerCamelCase ( self ) -> Tuple:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*lowercase )
@slow
def __lowerCamelCase ( self ) -> int:
__UpperCamelCase = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
self.assertIsNotNone(lowercase )
@require_torch
class UpperCAmelCase__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ) -> int:
__UpperCamelCase = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
__UpperCamelCase = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] )
with torch.no_grad():
__UpperCamelCase = model(lowercase )[0]
__UpperCamelCase = torch.Size([1, 8, 1_0_2_4] )
self.assertEqual(output.shape , lowercase )
__UpperCamelCase = torch.tensor(
[[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase , atol=1E-4 ) )
@require_torch
class UpperCAmelCase__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ) -> Optional[int]:
__UpperCamelCase = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
__UpperCamelCase = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] )
with torch.no_grad():
__UpperCamelCase = model(lowercase )[0]
__UpperCamelCase = torch.Size([1, 8, 5_0_3_5_8] )
self.assertEqual(output.shape , lowercase )
__UpperCamelCase = torch.tensor(
[[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase , atol=1E-4 ) )
| 349 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : Dict = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = '''gptj'''
__SCREAMING_SNAKE_CASE = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , lowercase=5_0_4_0_0 , lowercase=2_0_4_8 , lowercase=4_0_9_6 , lowercase=2_8 , lowercase=1_6 , lowercase=6_4 , lowercase=None , lowercase="gelu_new" , lowercase=0.0 , lowercase=0.0 , lowercase=0.0 , lowercase=1E-5 , lowercase=0.02 , lowercase=True , lowercase=5_0_2_5_6 , lowercase=5_0_2_5_6 , lowercase=False , **lowercase , ) -> Tuple:
__UpperCamelCase = vocab_size
__UpperCamelCase = n_positions
__UpperCamelCase = n_embd
__UpperCamelCase = n_layer
__UpperCamelCase = n_head
__UpperCamelCase = n_inner
__UpperCamelCase = rotary_dim
__UpperCamelCase = activation_function
__UpperCamelCase = resid_pdrop
__UpperCamelCase = embd_pdrop
__UpperCamelCase = attn_pdrop
__UpperCamelCase = layer_norm_epsilon
__UpperCamelCase = initializer_range
__UpperCamelCase = use_cache
__UpperCamelCase = bos_token_id
__UpperCamelCase = eos_token_id
super().__init__(
bos_token_id=lowercase , eos_token_id=lowercase , tie_word_embeddings=lowercase , **lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
def __init__( self , lowercase , lowercase = "default" , lowercase = None , lowercase = False , ) -> List[str]:
super().__init__(lowercase , task=lowercase , patching_specs=lowercase , use_past=lowercase )
if not getattr(self._config , """pad_token_id""" , lowercase ):
# TODO: how to do that better?
__UpperCamelCase = 0
@property
def __lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
__UpperCamelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(lowercase , direction="""inputs""" )
__UpperCamelCase = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__UpperCamelCase = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def __lowerCamelCase ( self ) -> int:
return self._config.n_layer
@property
def __lowerCamelCase ( self ) -> int:
return self._config.n_head
def __lowerCamelCase ( self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , ) -> Mapping[str, Any]:
__UpperCamelCase = super(lowercase , self ).generate_dummy_inputs(
lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
# We need to order the input in the way they appears in the forward()
__UpperCamelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__UpperCamelCase , __UpperCamelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__UpperCamelCase = seqlen + 2
__UpperCamelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__UpperCamelCase = [
(torch.zeros(lowercase ), torch.zeros(lowercase )) for _ in range(self.num_layers )
]
__UpperCamelCase = common_inputs["""attention_mask"""]
if self.use_past:
__UpperCamelCase = ordered_inputs["""attention_mask"""].dtype
__UpperCamelCase = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(lowercase , lowercase , dtype=lowercase )] , dim=1 )
return ordered_inputs
@property
def __lowerCamelCase ( self ) -> int:
return 1_3
| 349 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase_)
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True})
__SCREAMING_SNAKE_CASE = Features({'''text''': Value('''string''')})
__SCREAMING_SNAKE_CASE = Features({})
__SCREAMING_SNAKE_CASE = "text"
@property
def __lowerCamelCase ( self ) -> Dict[str, str]:
return {self.text_column: "text"}
| 349 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a__ : int = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = ['LayoutLMv3FeatureExtractor']
a__ : str = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 349 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ : Optional[int] = logging.get_logger(__name__)
a__ : Dict = {
'google/mobilenet_v1_1.0_224': 'https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json',
'google/mobilenet_v1_0.75_192': 'https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = '''mobilenet_v1'''
def __init__( self , lowercase=3 , lowercase=2_2_4 , lowercase=1.0 , lowercase=8 , lowercase="relu6" , lowercase=True , lowercase=0.999 , lowercase=0.02 , lowercase=0.001 , **lowercase , ) -> List[Any]:
super().__init__(**lowercase )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
__UpperCamelCase = num_channels
__UpperCamelCase = image_size
__UpperCamelCase = depth_multiplier
__UpperCamelCase = min_depth
__UpperCamelCase = hidden_act
__UpperCamelCase = tf_padding
__UpperCamelCase = classifier_dropout_prob
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = version.parse('''1.11''')
@property
def __lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def __lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def __lowerCamelCase ( self ) -> float:
return 1E-4
| 349 |
'''simple docstring'''
def _lowercase ( __A ,__A ):
'''simple docstring'''
__UpperCamelCase = len(__A )
__UpperCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__UpperCamelCase = True
# sum is not zero and set is empty then false
for i in range(1 ,required_sum + 1 ):
__UpperCamelCase = False
for i in range(1 ,arr_len + 1 ):
for j in range(1 ,required_sum + 1 ):
if arr[i - 1] > j:
__UpperCamelCase = subset[i - 1][j]
if arr[i - 1] <= j:
__UpperCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 | 1 |
'''simple docstring'''
def _lowercase ( __A ,__A ):
'''simple docstring'''
__UpperCamelCase = int(__A )
# Initialize Result
__UpperCamelCase = []
# Traverse through all denomination
for denomination in reversed(__A ):
# Find denominations
while int(__A ) >= int(__A ):
total_value -= int(__A )
answer.append(__A ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
a__ : int = []
a__ : List[Any] = '0'
if (
input('Do you want to enter your denominations ? (yY/n): ').strip().lower()
== "y"
):
a__ : Tuple = int(input('Enter the number of denominations you want to add: ').strip())
for i in range(0, n):
denominations.append(int(input(f'''Denomination {i}: ''').strip()))
a__ : Dict = input('Enter the change you want to make in Indian Currency: ').strip()
else:
# All denominations of Indian Currency if user does not enter
a__ : str = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
a__ : List[str] = input('Enter the change you want to make: ').strip()
if int(value) == 0 or int(value) < 0:
print('The total value cannot be zero or negative.')
else:
print(f'''Following is minimal change for {value}: ''')
a__ : str = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=' ')
| 349 |
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
a__ : Any = get_logger(__name__)
class UpperCAmelCase__ :
def __init__( self , lowercase = None ) -> List[str]:
__UpperCamelCase = (
os.path.join(lowercase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
__UpperCamelCase = Extractor
def __lowerCamelCase ( self , lowercase ) -> str:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
__UpperCamelCase = os.path.abspath(lowercase )
return os.path.join(self.extract_dir , hash_url_to_filename(lowercase ) )
def __lowerCamelCase ( self , lowercase , lowercase ) -> bool:
return force_extract or (
not os.path.isfile(lowercase ) and not (os.path.isdir(lowercase ) and os.listdir(lowercase ))
)
def __lowerCamelCase ( self , lowercase , lowercase = False ) -> str:
__UpperCamelCase = self.extractor.infer_extractor_format(lowercase )
if not extractor_format:
return input_path
__UpperCamelCase = self._get_output_path(lowercase )
if self._do_extract(lowercase , lowercase ):
self.extractor.extract(lowercase , lowercase , lowercase )
return output_path
class UpperCAmelCase__ ( UpperCAmelCase_):
@classmethod
@abstractmethod
def __lowerCamelCase ( cls , lowercase , **lowercase ) -> bool:
...
@staticmethod
@abstractmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
...
class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = []
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> int:
with open(lowercase , """rb""" ) as f:
return f.read(lowercase )
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase = b"" ) -> bool:
if not magic_number:
__UpperCamelCase = max(len(lowercase ) for cls_magic_number in cls.magic_numbers )
try:
__UpperCamelCase = cls.read_magic_number(lowercase , lowercase )
except OSError:
return False
return any(magic_number.startswith(lowercase ) for cls_magic_number in cls.magic_numbers )
class UpperCAmelCase__ ( UpperCAmelCase_):
@classmethod
def __lowerCamelCase ( cls , lowercase , **lowercase ) -> bool:
return tarfile.is_tarfile(lowercase )
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> str:
def resolved(lowercase ) -> str:
return os.path.realpath(os.path.abspath(lowercase ) )
def badpath(lowercase , lowercase ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(lowercase , lowercase ) ).startswith(lowercase )
def badlink(lowercase , lowercase ) -> bool:
# Links are interpreted relative to the directory containing the link
__UpperCamelCase = resolved(os.path.join(lowercase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=lowercase )
__UpperCamelCase = resolved(lowercase )
for finfo in members:
if badpath(finfo.name , lowercase ):
logger.error(f"Extraction of {finfo.name} is blocked (illegal path)" )
elif finfo.issym() and badlink(lowercase , lowercase ):
logger.error(f"Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}" )
elif finfo.islnk() and badlink(lowercase , lowercase ):
logger.error(f"Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}" )
else:
yield finfo
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
os.makedirs(lowercase , exist_ok=lowercase )
__UpperCamelCase = tarfile.open(lowercase )
tar_file.extractall(lowercase , members=TarExtractor.safemembers(lowercase , lowercase ) )
tar_file.close()
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\x1F\x8B''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
with gzip.open(lowercase , """rb""" ) as gzip_file:
with open(lowercase , """wb""" ) as extracted_file:
shutil.copyfileobj(lowercase , lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [
B'''PK\x03\x04''',
B'''PK\x05\x06''', # empty archive
B'''PK\x07\x08''', # spanned archive
]
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase = b"" ) -> bool:
if super().is_extractable(lowercase , magic_number=lowercase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(lowercase , """rb""" ) as fp:
__UpperCamelCase = _EndRecData(lowercase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
__UpperCamelCase = fp.read(lowercase ) # CD is where we expect it to be
if len(lowercase ) == sizeCentralDir:
__UpperCamelCase = struct.unpack(lowercase , lowercase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
os.makedirs(lowercase , exist_ok=lowercase )
with zipfile.ZipFile(lowercase , """r""" ) as zip_file:
zip_file.extractall(lowercase )
zip_file.close()
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\xFD\x37\x7A\x58\x5A\x00''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
with lzma.open(lowercase ) as compressed_file:
with open(lowercase , """wb""" ) as extracted_file:
shutil.copyfileobj(lowercase , lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''Rar!\x1a\x07\x00''', B'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(lowercase , exist_ok=lowercase )
__UpperCamelCase = rarfile.RarFile(lowercase )
rf.extractall(lowercase )
rf.close()
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\x28\xb5\x2F\xFD''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
__UpperCamelCase = zstd.ZstdDecompressor()
with open(lowercase , """rb""" ) as ifh, open(lowercase , """wb""" ) as ofh:
dctx.copy_stream(lowercase , lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\x42\x5A\x68''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
with bza.open(lowercase , """rb""" ) as compressed_file:
with open(lowercase , """wb""" ) as extracted_file:
shutil.copyfileobj(lowercase , lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\x37\x7A\xBC\xAF\x27\x1C''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(lowercase , exist_ok=lowercase )
with pyazr.SevenZipFile(lowercase , """r""" ) as archive:
archive.extractall(lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\x04\x22\x4D\x18''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(lowercase , """rb""" ) as compressed_file:
with open(lowercase , """wb""" ) as extracted_file:
shutil.copyfileobj(lowercase , lowercase )
class UpperCAmelCase__ :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
__SCREAMING_SNAKE_CASE = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def __lowerCamelCase ( cls ) -> Union[str, Any]:
return max(
len(lowercase )
for extractor in cls.extractors.values()
if issubclass(lowercase , lowercase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> str:
try:
return MagicNumberBaseExtractor.read_magic_number(lowercase , magic_number_length=lowercase )
except OSError:
return b""
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase = False ) -> bool:
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=lowercase , )
__UpperCamelCase = cls.infer_extractor_format(lowercase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def __lowerCamelCase ( cls , lowercase ) -> str: # <Added version="2.4.0"/>
__UpperCamelCase = cls._get_magic_number_max_length()
__UpperCamelCase = cls._read_magic_number(lowercase , lowercase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(lowercase , magic_number=lowercase ):
return extractor_format
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase , lowercase = None , lowercase = "deprecated" , ) -> None:
os.makedirs(os.path.dirname(lowercase ) , exist_ok=lowercase )
# Prevent parallel extractions
__UpperCamelCase = str(Path(lowercase ).with_suffix(""".lock""" ) )
with FileLock(lowercase ):
shutil.rmtree(lowercase , ignore_errors=lowercase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(lowercase , lowercase ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=lowercase , )
__UpperCamelCase = extractor if extractor != """deprecated""" else extractor_format
else:
__UpperCamelCase = cls.extractors[extractor_format]
return extractor.extract(lowercase , lowercase )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=lowercase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(lowercase ):
return extractor.extract(lowercase , lowercase )
| 349 | 1 |
'''simple docstring'''
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class UpperCAmelCase__ :
def __init__( self , lowercase ) -> str:
if isinstance(lowercase , lowercase ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
__UpperCamelCase = deepcopy(lowercase )
elif os.path.exists(lowercase ):
with io.open(lowercase , """r""" , encoding="""utf-8""" ) as f:
__UpperCamelCase = json.load(lowercase )
else:
try:
__UpperCamelCase = baseaa.urlsafe_baadecode(lowercase ).decode("""utf-8""" )
__UpperCamelCase = json.loads(lowercase )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f"Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}" )
__UpperCamelCase = config
self.set_stage_and_offload()
def __lowerCamelCase ( self ) -> Dict:
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
__UpperCamelCase = self.get_value("""zero_optimization.stage""" , -1 )
# offload
__UpperCamelCase = False
if self.is_zeroa() or self.is_zeroa():
__UpperCamelCase = set(["""cpu""", """nvme"""] )
__UpperCamelCase = set(
[
self.get_value("""zero_optimization.offload_optimizer.device""" ),
self.get_value("""zero_optimization.offload_param.device""" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
__UpperCamelCase = True
def __lowerCamelCase ( self , lowercase ) -> int:
__UpperCamelCase = self.config
# find the config node of interest if it exists
__UpperCamelCase = ds_key_long.split(""".""" )
__UpperCamelCase = nodes.pop()
for node in nodes:
__UpperCamelCase = config.get(lowercase )
if config is None:
return None, ds_key
return config, ds_key
def __lowerCamelCase ( self , lowercase , lowercase=None ) -> int:
__UpperCamelCase , __UpperCamelCase = self.find_config_node(lowercase )
if config is None:
return default
return config.get(lowercase , lowercase )
def __lowerCamelCase ( self , lowercase , lowercase=False ) -> Dict:
__UpperCamelCase = self.config
# find the config node of interest if it exists
__UpperCamelCase = ds_key_long.split(""".""" )
for node in nodes:
__UpperCamelCase = config
__UpperCamelCase = config.get(lowercase )
if config is None:
if must_exist:
raise ValueError(f"Can't find {ds_key_long} entry in the config: {self.config}" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(lowercase )
def __lowerCamelCase ( self , lowercase ) -> str:
__UpperCamelCase = self.get_value(lowercase )
return False if value is None else bool(lowercase )
def __lowerCamelCase ( self , lowercase ) -> Any:
__UpperCamelCase = self.get_value(lowercase )
return False if value is None else not bool(lowercase )
def __lowerCamelCase ( self ) -> List[Any]:
return self._stage == 2
def __lowerCamelCase ( self ) -> List[Any]:
return self._stage == 3
def __lowerCamelCase ( self ) -> List[str]:
return self._offload
class UpperCAmelCase__ :
def __init__( self , lowercase ) -> Tuple:
__UpperCamelCase = engine
def __lowerCamelCase ( self , lowercase , **lowercase ) -> Union[str, Any]:
# runs backpropagation and handles mixed precision
self.engine.backward(lowercase , **lowercase )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class UpperCAmelCase__ ( UpperCAmelCase_):
def __init__( self , lowercase ) -> Union[str, Any]:
super().__init__(lowercase , device_placement=lowercase , scaler=lowercase )
__UpperCamelCase = hasattr(self.optimizer , """overflow""" )
def __lowerCamelCase ( self , lowercase=None ) -> str:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def __lowerCamelCase ( self ) -> Tuple:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def __lowerCamelCase ( self ) -> List[Any]:
if self.__has_overflow__:
return self.optimizer.overflow
return False
class UpperCAmelCase__ ( UpperCAmelCase_):
def __init__( self , lowercase , lowercase ) -> Optional[Any]:
super().__init__(lowercase , lowercase )
def __lowerCamelCase ( self ) -> int:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class UpperCAmelCase__ :
def __init__( self , lowercase , lowercase=0.001 , lowercase=0 , **lowercase ) -> List[Any]:
__UpperCamelCase = params
__UpperCamelCase = lr
__UpperCamelCase = weight_decay
__UpperCamelCase = kwargs
class UpperCAmelCase__ :
def __init__( self , lowercase , lowercase=None , lowercase=0 , **lowercase ) -> Optional[int]:
__UpperCamelCase = optimizer
__UpperCamelCase = total_num_steps
__UpperCamelCase = warmup_num_steps
__UpperCamelCase = kwargs
| 349 |
'''simple docstring'''
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
a__ : List[str] = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class UpperCAmelCase__ :
__SCREAMING_SNAKE_CASE = PegasusConfig
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = '''gelu'''
def __init__( self , lowercase , lowercase=1_3 , lowercase=7 , lowercase=True , lowercase=False , lowercase=9_9 , lowercase=3_2 , lowercase=5 , lowercase=4 , lowercase=3_7 , lowercase=0.1 , lowercase=0.1 , lowercase=2_0 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> Optional[Any]:
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = eos_token_id
__UpperCamelCase = pad_token_id
__UpperCamelCase = bos_token_id
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
__UpperCamelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
__UpperCamelCase = np.concatenate([input_ids, eos_tensor] , axis=1 )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__UpperCamelCase = prepare_pegasus_inputs_dict(lowercase , lowercase , lowercase )
return config, inputs_dict
def __lowerCamelCase ( self , lowercase , lowercase , lowercase ) -> Dict:
__UpperCamelCase = 2_0
__UpperCamelCase = model_class_name(lowercase )
__UpperCamelCase = model.encode(inputs_dict["""input_ids"""] )
__UpperCamelCase , __UpperCamelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , lowercase , lowercase )
__UpperCamelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , lowercase , decoder_attention_mask=lowercase , past_key_values=lowercase , decoder_position_ids=lowercase , )
__UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , lowercase , decoder_attention_mask=lowercase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase , )
__UpperCamelCase = model.decode(lowercase , lowercase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase ) -> Any:
__UpperCamelCase = 2_0
__UpperCamelCase = model_class_name(lowercase )
__UpperCamelCase = model.encode(inputs_dict["""input_ids"""] )
__UpperCamelCase , __UpperCamelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__UpperCamelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , lowercase , lowercase )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , lowercase , decoder_attention_mask=lowercase , past_key_values=lowercase , decoder_position_ids=lowercase , )
__UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , lowercase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase , decoder_position_ids=lowercase , )
__UpperCamelCase = model.decode(lowercase , lowercase , decoder_attention_mask=lowercase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def _lowercase ( __A ,__A ,__A ,__A=None ,__A=None ,):
'''simple docstring'''
if attention_mask is None:
__UpperCamelCase = np.not_equal(__A ,config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
__UpperCamelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape ,dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ).astype(np.inta ),
] ,axis=-1 ,)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class UpperCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__SCREAMING_SNAKE_CASE = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = FlaxPegasusModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=lowercase )
def __lowerCamelCase ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowercase , lowercase , lowercase )
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowercase , lowercase , lowercase )
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCamelCase = self._prepare_for_class(lowercase , lowercase )
__UpperCamelCase = model_class(lowercase )
@jax.jit
def encode_jitted(lowercase , lowercase=None , **lowercase ):
return model.encode(input_ids=lowercase , attention_mask=lowercase )
with self.subTest("""JIT Enabled""" ):
__UpperCamelCase = encode_jitted(**lowercase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__UpperCamelCase = encode_jitted(**lowercase ).to_tuple()
self.assertEqual(len(lowercase ) , len(lowercase ) )
for jitted_output, output in zip(lowercase , lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCamelCase = model_class(lowercase )
__UpperCamelCase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
__UpperCamelCase = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowercase , lowercase , lowercase ):
return model.decode(
decoder_input_ids=lowercase , decoder_attention_mask=lowercase , encoder_outputs=lowercase , )
with self.subTest("""JIT Enabled""" ):
__UpperCamelCase = decode_jitted(**lowercase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__UpperCamelCase = decode_jitted(**lowercase ).to_tuple()
self.assertEqual(len(lowercase ) , len(lowercase ) )
for jitted_output, output in zip(lowercase , lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __lowerCamelCase ( self ) -> Dict:
for model_class_name in self.all_model_classes:
__UpperCamelCase = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=lowercase )
__UpperCamelCase = np.ones((1, 1) )
__UpperCamelCase = model(lowercase )
self.assertIsNotNone(lowercase )
@slow
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
__UpperCamelCase = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
__UpperCamelCase = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
__UpperCamelCase = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
__UpperCamelCase = tokenizer(lowercase , return_tensors="""np""" , truncation=lowercase , max_length=5_1_2 , padding=lowercase )
__UpperCamelCase = model.generate(**lowercase , num_beams=2 ).sequences
__UpperCamelCase = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
assert tgt_text == decoded
| 349 | 1 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ : Tuple = logging.get_logger(__name__)
a__ : List[str] = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = '''segformer'''
def __init__( self , lowercase=3 , lowercase=4 , lowercase=[2, 2, 2, 2] , lowercase=[8, 4, 2, 1] , lowercase=[3_2, 6_4, 1_6_0, 2_5_6] , lowercase=[7, 3, 3, 3] , lowercase=[4, 2, 2, 2] , lowercase=[1, 2, 5, 8] , lowercase=[4, 4, 4, 4] , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.1 , lowercase=0.02 , lowercase=0.1 , lowercase=1E-6 , lowercase=2_5_6 , lowercase=2_5_5 , **lowercase , ) -> Tuple:
super().__init__(**lowercase )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , lowercase , )
__UpperCamelCase = num_channels
__UpperCamelCase = num_encoder_blocks
__UpperCamelCase = depths
__UpperCamelCase = sr_ratios
__UpperCamelCase = hidden_sizes
__UpperCamelCase = patch_sizes
__UpperCamelCase = strides
__UpperCamelCase = mlp_ratios
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = classifier_dropout_prob
__UpperCamelCase = initializer_range
__UpperCamelCase = drop_path_rate
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = decoder_hidden_size
__UpperCamelCase = kwargs.get("""reshape_last_stage""" , lowercase )
__UpperCamelCase = semantic_loss_ignore_index
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = version.parse('''1.11''')
@property
def __lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __lowerCamelCase ( self ) -> float:
return 1E-4
@property
def __lowerCamelCase ( self ) -> int:
return 1_2
| 349 |
'''simple docstring'''
import pytest
a__ : List[str] = '__dummy_dataset1__'
a__ : Optional[int] = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def _lowercase ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def _lowercase ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = dataset_loading_script_name
__UpperCamelCase = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=__A )
__UpperCamelCase = script_dir / f"{script_name}.py"
with open(__A ,"""w""" ) as f:
f.write(__A )
return str(__A )
| 349 | 1 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = [1_0, 2_0, 3_0, 4_0, 5_0, 6_0]
__UpperCamelCase = [2, 4, 6, 8, 1_0, 1_2]
__UpperCamelCase = 1_0_0
self.assertEqual(kp.calc_profit(lowercase , lowercase , lowercase ) , 2_1_0 )
def __lowerCamelCase ( self ) -> str:
self.assertRaisesRegex(lowercase , """max_weight must greater than zero.""" )
def __lowerCamelCase ( self ) -> str:
self.assertRaisesRegex(lowercase , """Weight can not be negative.""" )
def __lowerCamelCase ( self ) -> Any:
self.assertRaisesRegex(lowercase , """Profit can not be negative.""" )
def __lowerCamelCase ( self ) -> Union[str, Any]:
self.assertRaisesRegex(lowercase , """max_weight must greater than zero.""" )
def __lowerCamelCase ( self ) -> Dict:
self.assertRaisesRegex(
lowercase , """The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 349 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
a__ : Any = logging.get_logger(__name__)
a__ : Optional[int] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
a__ : List[str] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = {}
with open(__A ,"""r""" ) as file:
for line_number, line in enumerate(__A ):
__UpperCamelCase = line.strip()
if line:
__UpperCamelCase = line.split()
__UpperCamelCase = line_number
__UpperCamelCase = words[0]
__UpperCamelCase = value
return result
def _lowercase ( __A ,__A ,__A ,__A ,__A ):
'''simple docstring'''
for attribute in key.split(""".""" ):
__UpperCamelCase = getattr(__A ,__A )
__UpperCamelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__A ):
__UpperCamelCase = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__UpperCamelCase = """param"""
if weight_type is not None and weight_type != "param":
__UpperCamelCase = getattr(__A ,__A ).shape
elif weight_type is not None and weight_type == "param":
__UpperCamelCase = hf_pointer
for attribute in hf_param_name.split(""".""" ):
__UpperCamelCase = getattr(__A ,__A )
__UpperCamelCase = shape_pointer.shape
# let's reduce dimension
__UpperCamelCase = value[0]
else:
__UpperCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
__UpperCamelCase = value
elif weight_type == "weight_g":
__UpperCamelCase = value
elif weight_type == "weight_v":
__UpperCamelCase = value
elif weight_type == "bias":
__UpperCamelCase = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
__UpperCamelCase = getattr(__A ,__A )
__UpperCamelCase = value
else:
__UpperCamelCase = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _lowercase ( __A ,__A ,__A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__A ):
__UpperCamelCase = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__UpperCamelCase = """param"""
if weight_type is not None and weight_type != "param":
__UpperCamelCase = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__UpperCamelCase = """.""".join([key, hf_param_name] )
else:
__UpperCamelCase = key
__UpperCamelCase = value if """lm_head""" in full_key else value[0]
a__ : Dict = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def _lowercase ( __A ,__A ,__A=None ,__A=None ):
'''simple docstring'''
__UpperCamelCase = False
for key, mapped_key in MAPPING.items():
__UpperCamelCase = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__UpperCamelCase = True
if "*" in mapped_key:
__UpperCamelCase = name.split(__A )[0].split(""".""" )[-2]
__UpperCamelCase = mapped_key.replace("""*""" ,__A )
if "weight_g" in name:
__UpperCamelCase = """weight_g"""
elif "weight_v" in name:
__UpperCamelCase = """weight_v"""
elif "bias" in name:
__UpperCamelCase = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__UpperCamelCase = """weight"""
else:
__UpperCamelCase = None
if hf_dict is not None:
rename_dict(__A ,__A ,__A ,__A ,__A )
else:
set_recursively(__A ,__A ,__A ,__A ,__A )
return is_used
return is_used
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = []
__UpperCamelCase = fairseq_model.state_dict()
__UpperCamelCase = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__A ,__A ,__A ,__A ,hf_model.config.feat_extract_norm == """group""" ,)
__UpperCamelCase = True
else:
__UpperCamelCase = load_wavaveca_layer(__A ,__A ,__A )
if not is_used:
unused_weights.append(__A )
logger.warning(f"Unused weights: {unused_weights}" )
def _lowercase ( __A ,__A ,__A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = full_name.split("""conv_layers.""" )[-1]
__UpperCamelCase = name.split(""".""" )
__UpperCamelCase = int(items[0] )
__UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
__UpperCamelCase = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
__UpperCamelCase = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
__UpperCamelCase = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
__UpperCamelCase = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(__A )
@torch.no_grad()
def _lowercase ( __A ,__A ,__A=None ,__A=None ,__A=True ,__A=False ):
'''simple docstring'''
if config_path is not None:
__UpperCamelCase = WavaVecaConfig.from_pretrained(__A )
else:
__UpperCamelCase = WavaVecaConfig()
if is_seq_class:
__UpperCamelCase = read_txt_into_dict(__A )
__UpperCamelCase = idalabel
__UpperCamelCase = WavaVecaForSequenceClassification(__A )
__UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=__A ,return_attention_mask=__A ,)
feature_extractor.save_pretrained(__A )
elif is_finetuned:
if dict_path:
__UpperCamelCase = Dictionary.load(__A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__UpperCamelCase = target_dict.pad_index
__UpperCamelCase = target_dict.bos_index
__UpperCamelCase = target_dict.eos_index
__UpperCamelCase = len(target_dict.symbols )
__UpperCamelCase = os.path.join(__A ,"""vocab.json""" )
if not os.path.isdir(__A ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__A ) )
return
os.makedirs(__A ,exist_ok=__A )
__UpperCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
__UpperCamelCase = 0
__UpperCamelCase = 1
with open(__A ,"""w""" ,encoding="""utf-8""" ) as vocab_handle:
json.dump(__A ,__A )
__UpperCamelCase = WavaVecaCTCTokenizer(
__A ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="""|""" ,do_lower_case=__A ,)
__UpperCamelCase = True if config.feat_extract_norm == """layer""" else False
__UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=__A ,return_attention_mask=__A ,)
__UpperCamelCase = WavaVecaProcessor(feature_extractor=__A ,tokenizer=__A )
processor.save_pretrained(__A )
__UpperCamelCase = WavaVecaForCTC(__A )
else:
__UpperCamelCase = WavaVecaForPreTraining(__A )
if is_finetuned or is_seq_class:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__UpperCamelCase = argparse.Namespace(task="""audio_pretraining""" )
__UpperCamelCase = fairseq.tasks.setup_task(__A )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ,task=__A )
__UpperCamelCase = model[0].eval()
recursively_load_weights(__A ,__A ,not is_finetuned )
hf_wavavec.save_pretrained(__A )
if __name__ == "__main__":
a__ : int = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
a__ : Optional[int] = parser.parse_args()
a__ : str = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 349 | 1 |
'''simple docstring'''
import copy
import re
class UpperCAmelCase__ :
__SCREAMING_SNAKE_CASE = '''hp'''
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = None
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase ) -> Dict:
__UpperCamelCase = prefix
__UpperCamelCase = defaults
cls.build_naming_info()
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> Optional[Any]:
if len(lowercase ) == 0:
return ""
__UpperCamelCase = None
if any(char.isdigit() for char in word ):
raise Exception(f"Parameters should not contain numbers: '{word}' contains a number" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(lowercase ) + 1 ):
__UpperCamelCase = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
__UpperCamelCase = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(lowercase ):
__UpperCamelCase = """"""
while integer != 0:
__UpperCamelCase = chr(ord("""A""" ) + integer % 1_0 ) + s
integer //= 1_0
return s
__UpperCamelCase = 0
while True:
__UpperCamelCase = word + """#""" + int_to_alphabetic(lowercase )
if sword in info["reverse_short_word"]:
continue
else:
__UpperCamelCase = sword
break
__UpperCamelCase = short_word
__UpperCamelCase = word
return short_word
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> Optional[int]:
__UpperCamelCase = param_name.split("""_""" )
__UpperCamelCase = [TrialShortNamer.shortname_for_word(lowercase , lowercase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
__UpperCamelCase = ["""""", """_"""]
for separator in separators:
__UpperCamelCase = separator.join(lowercase )
if shortname not in info["reverse_short_param"]:
__UpperCamelCase = shortname
__UpperCamelCase = param_name
return shortname
return param_name
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> Union[str, Any]:
__UpperCamelCase = TrialShortNamer.shortname_for_key(lowercase , lowercase )
__UpperCamelCase = short_name
__UpperCamelCase = param_name
@classmethod
def __lowerCamelCase ( cls ) -> Any:
if cls.NAMING_INFO is not None:
return
__UpperCamelCase = {
"""short_word""": {},
"""reverse_short_word""": {},
"""short_param""": {},
"""reverse_short_param""": {},
}
__UpperCamelCase = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(lowercase , lowercase )
__UpperCamelCase = info
@classmethod
def __lowerCamelCase ( cls , lowercase ) -> str:
cls.build_naming_info()
assert cls.PREFIX is not None
__UpperCamelCase = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f"You should provide a default value for the param name {k} with value {v}" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
__UpperCamelCase = cls.NAMING_INFO["""short_param"""][k]
if isinstance(lowercase , lowercase ):
__UpperCamelCase = 1 if v else 0
__UpperCamelCase = """""" if isinstance(lowercase , (int, float) ) else """-"""
__UpperCamelCase = f"{key}{sep}{v}"
name.append(lowercase )
return "_".join(lowercase )
@classmethod
def __lowerCamelCase ( cls , lowercase ) -> List[str]:
__UpperCamelCase = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
__UpperCamelCase = []
else:
__UpperCamelCase = repr.split("""_""" )
__UpperCamelCase = {}
for value in values:
if "-" in value:
__UpperCamelCase , __UpperCamelCase = value.split("""-""" )
else:
__UpperCamelCase = re.sub("""[0-9.]""" , """""" , lowercase )
__UpperCamelCase = float(re.sub("""[^0-9.]""" , """""" , lowercase ) )
__UpperCamelCase = cls.NAMING_INFO["""reverse_short_param"""][p_k]
__UpperCamelCase = p_v
for k in cls.DEFAULTS:
if k not in parameters:
__UpperCamelCase = cls.DEFAULTS[k]
return parameters
| 349 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCAmelCase__ :
def __init__( self , lowercase , ) -> Union[str, Any]:
__UpperCamelCase = parent
__UpperCamelCase = 1_3
__UpperCamelCase = 7
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = True
__UpperCamelCase = 9_9
__UpperCamelCase = 3_2
__UpperCamelCase = 2
__UpperCamelCase = 4
__UpperCamelCase = 3_7
__UpperCamelCase = """gelu"""
__UpperCamelCase = 0.1
__UpperCamelCase = 0.1
__UpperCamelCase = 5_1_2
__UpperCamelCase = 1_6
__UpperCamelCase = 2
__UpperCamelCase = 0.02
__UpperCamelCase = 3
__UpperCamelCase = 4
__UpperCamelCase = None
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Dict:
__UpperCamelCase = TFDistilBertModel(config=lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
__UpperCamelCase = [input_ids, input_mask]
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
__UpperCamelCase = TFDistilBertForMaskedLM(config=lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Tuple:
__UpperCamelCase = TFDistilBertForQuestionAnswering(config=lowercase )
__UpperCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Tuple:
__UpperCamelCase = self.num_labels
__UpperCamelCase = TFDistilBertForSequenceClassification(lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
__UpperCamelCase = self.num_choices
__UpperCamelCase = TFDistilBertForMultipleChoice(lowercase )
__UpperCamelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
__UpperCamelCase = self.num_labels
__UpperCamelCase = TFDistilBertForTokenClassification(lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = self.prepare_config_and_inputs()
((__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase)) = config_and_inputs
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
__SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = TFDistilBertModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=lowercase , dim=3_7 )
def __lowerCamelCase ( self ) -> Any:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowercase )
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowercase )
def __lowerCamelCase ( self ) -> int:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowercase )
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowercase )
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowercase )
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowercase )
@slow
def __lowerCamelCase ( self ) -> Tuple:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
__UpperCamelCase = TFDistilBertModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ) -> Optional[int]:
__UpperCamelCase = TFDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
__UpperCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCamelCase = model(lowercase )[0]
__UpperCamelCase = [1, 6, 7_6_8]
self.assertEqual(output.shape , lowercase )
__UpperCamelCase = tf.constant(
[
[
[0.19_261_885, -0.13_732_955, 0.4_119_799],
[0.22_150_156, -0.07_422_661, 0.39_037_204],
[0.22_756_018, -0.0_896_414, 0.3_701_467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-4 )
| 349 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a__ : Optional[int] = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = ['ConditionalDetrFeatureExtractor']
a__ : List[str] = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
a__ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 349 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _lowercase ( __A ,__A ):
'''simple docstring'''
return math.sqrt(sum(pow(a - b ,2 ) for a, b in zip(__A ,__A ) ) )
def _lowercase ( __A ,__A ):
'''simple docstring'''
if dataset.ndim != value_array.ndim:
__UpperCamelCase = (
"""Wrong input data's dimensions... """
f"dataset : {dataset.ndim}, value_array : {value_array.ndim}"
)
raise ValueError(__A )
try:
if dataset.shape[1] != value_array.shape[1]:
__UpperCamelCase = (
"""Wrong input data's shape... """
f"dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"
)
raise ValueError(__A )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("""Wrong shape""" )
if dataset.dtype != value_array.dtype:
__UpperCamelCase = (
"""Input data have different datatype... """
f"dataset : {dataset.dtype}, value_array : {value_array.dtype}"
)
raise TypeError(__A )
__UpperCamelCase = []
for value in value_array:
__UpperCamelCase = euclidean(__A ,dataset[0] )
__UpperCamelCase = dataset[0].tolist()
for dataset_value in dataset[1:]:
__UpperCamelCase = euclidean(__A ,__A )
if dist > temp_dist:
__UpperCamelCase = temp_dist
__UpperCamelCase = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _lowercase ( __A ,__A ):
'''simple docstring'''
return np.dot(__A ,__A ) / (norm(__A ) * norm(__A ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 | 1 |
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = (DDPMParallelScheduler,)
def __lowerCamelCase ( self , **lowercase ) -> List[str]:
__UpperCamelCase = {
"""num_train_timesteps""": 1_0_0_0,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**lowercase )
return config
def __lowerCamelCase ( self ) -> List[Any]:
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowercase )
def __lowerCamelCase ( self ) -> Optional[int]:
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowercase , beta_end=lowercase )
def __lowerCamelCase ( self ) -> Dict:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase )
def __lowerCamelCase ( self ) -> int:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowercase )
def __lowerCamelCase ( self ) -> Optional[Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase )
def __lowerCamelCase ( self ) -> List[str]:
self.check_over_configs(thresholding=lowercase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowercase , prediction_type=lowercase , sample_max_value=lowercase , )
def __lowerCamelCase ( self ) -> Dict:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase )
def __lowerCamelCase ( self ) -> Optional[Any]:
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=lowercase )
def __lowerCamelCase ( self ) -> Tuple:
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config()
__UpperCamelCase = scheduler_class(**lowercase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1E-5
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config()
__UpperCamelCase = scheduler_class(**lowercase )
__UpperCamelCase = len(lowercase )
__UpperCamelCase = self.dummy_model()
__UpperCamelCase = self.dummy_sample_deter
__UpperCamelCase = self.dummy_sample_deter + 0.1
__UpperCamelCase = self.dummy_sample_deter - 0.1
__UpperCamelCase = samplea.shape[0]
__UpperCamelCase = torch.stack([samplea, samplea, samplea] , dim=0 )
__UpperCamelCase = torch.arange(lowercase )[0:3, None].repeat(1 , lowercase )
__UpperCamelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__UpperCamelCase = scheduler.batch_step_no_noise(lowercase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
__UpperCamelCase = torch.sum(torch.abs(lowercase ) )
__UpperCamelCase = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 1_153.1_833 ) < 1E-2
assert abs(result_mean.item() - 0.5_005 ) < 1E-3
def __lowerCamelCase ( self ) -> Tuple:
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config()
__UpperCamelCase = scheduler_class(**lowercase )
__UpperCamelCase = len(lowercase )
__UpperCamelCase = self.dummy_model()
__UpperCamelCase = self.dummy_sample_deter
__UpperCamelCase = torch.manual_seed(0 )
for t in reversed(range(lowercase ) ):
# 1. predict noise residual
__UpperCamelCase = model(lowercase , lowercase )
# 2. predict previous mean of sample x_t-1
__UpperCamelCase = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase ).prev_sample
__UpperCamelCase = pred_prev_sample
__UpperCamelCase = torch.sum(torch.abs(lowercase ) )
__UpperCamelCase = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 258.9_606 ) < 1E-2
assert abs(result_mean.item() - 0.3_372 ) < 1E-3
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config(prediction_type="""v_prediction""" )
__UpperCamelCase = scheduler_class(**lowercase )
__UpperCamelCase = len(lowercase )
__UpperCamelCase = self.dummy_model()
__UpperCamelCase = self.dummy_sample_deter
__UpperCamelCase = torch.manual_seed(0 )
for t in reversed(range(lowercase ) ):
# 1. predict noise residual
__UpperCamelCase = model(lowercase , lowercase )
# 2. predict previous mean of sample x_t-1
__UpperCamelCase = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase ).prev_sample
__UpperCamelCase = pred_prev_sample
__UpperCamelCase = torch.sum(torch.abs(lowercase ) )
__UpperCamelCase = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 202.0_296 ) < 1E-2
assert abs(result_mean.item() - 0.2_631 ) < 1E-3
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config()
__UpperCamelCase = scheduler_class(**lowercase )
__UpperCamelCase = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=lowercase )
__UpperCamelCase = scheduler.timesteps
for i, timestep in enumerate(lowercase ):
if i == len(lowercase ) - 1:
__UpperCamelCase = -1
else:
__UpperCamelCase = timesteps[i + 1]
__UpperCamelCase = scheduler.previous_timestep(lowercase )
__UpperCamelCase = prev_t.item()
self.assertEqual(lowercase , lowercase )
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config()
__UpperCamelCase = scheduler_class(**lowercase )
__UpperCamelCase = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(lowercase , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=lowercase )
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config()
__UpperCamelCase = scheduler_class(**lowercase )
__UpperCamelCase = [1_0_0, 8_7, 5_0, 1, 0]
__UpperCamelCase = len(lowercase )
with self.assertRaises(lowercase , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=lowercase , timesteps=lowercase )
def __lowerCamelCase ( self ) -> int:
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config()
__UpperCamelCase = scheduler_class(**lowercase )
__UpperCamelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowercase , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=lowercase )
| 349 |
'''simple docstring'''
from datetime import datetime
import requests
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url="""
__UpperCamelCase = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""]
return requests.get(__A ).content
if __name__ == "__main__":
a__ : int = input('Enter Video/IGTV url: ').strip()
a__ : int = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''')
| 349 | 1 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
"""decoder.output_projection.weight""",
]
for k in ignore_keys:
state_dict.pop(__A ,__A )
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = emb.weight.shape
__UpperCamelCase = nn.Linear(__A ,__A ,bias=__A )
__UpperCamelCase = emb.weight.data
return lin_layer
def _lowercase ( __A ,__A="facebook/mbart-large-en-ro" ,__A=False ,__A=False ):
'''simple docstring'''
__UpperCamelCase = torch.load(__A ,map_location="""cpu""" )["""model"""]
remove_ignore_keys_(__A )
__UpperCamelCase = state_dict["""encoder.embed_tokens.weight"""].shape[0]
__UpperCamelCase = MBartConfig.from_pretrained(__A ,vocab_size=__A )
if mbart_aa and finetuned:
__UpperCamelCase = """relu"""
__UpperCamelCase = state_dict["""decoder.embed_tokens.weight"""]
__UpperCamelCase = MBartForConditionalGeneration(__A )
model.model.load_state_dict(__A )
if finetuned:
__UpperCamelCase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
a__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
a__ : int = parser.parse_args()
a__ : str = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 349 |
'''simple docstring'''
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _lowercase ( __A ,__A=False ):
'''simple docstring'''
try:
__UpperCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__UpperCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
__UpperCamelCase = strtobool(__A )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no." )
return _value
a__ : Optional[Any] = parse_flag_from_env('RUN_SLOW', default=False)
a__ : Union[str, Any] = parse_flag_from_env('RUN_REMOTE', default=False)
a__ : Any = parse_flag_from_env('RUN_LOCAL', default=True)
a__ : List[Any] = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
a__ : Optional[int] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
a__ : Optional[int] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
a__ : Optional[Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
a__ : List[Any] = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
a__ : str = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
a__ : str = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
a__ : Tuple = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def _lowercase ( __A ):
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
__UpperCamelCase = unittest.skip("""test requires faiss""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import regex # noqa
except ImportError:
__UpperCamelCase = unittest.skip("""test requires regex""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
__UpperCamelCase = unittest.skip("""test requires elasticsearch""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
__UpperCamelCase = unittest.skip("""test requires sqlalchemy""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not config.TORCH_AVAILABLE:
__UpperCamelCase = unittest.skip("""test requires PyTorch""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not config.TF_AVAILABLE:
__UpperCamelCase = unittest.skip("""test requires TensorFlow""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not config.JAX_AVAILABLE:
__UpperCamelCase = unittest.skip("""test requires JAX""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not config.PIL_AVAILABLE:
__UpperCamelCase = unittest.skip("""test requires Pillow""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("""test requires transformers""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("""test requires tiktoken""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("""test requires spacy""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
def _require_spacy_model(__A ):
try:
import spacy # noqa F401
spacy.load(__A )
except ImportError:
return unittest.skip("""test requires spacy""" )(__A )
except OSError:
return unittest.skip("""test requires spacy model '{}'""".format(__A ) )(__A )
else:
return test_case
return _require_spacy_model
def _lowercase ( __A ):
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("""test requires pyspark""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("""test requires joblibspark""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
__UpperCamelCase = unittest.skip("""test is slow""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
__UpperCamelCase = unittest.skip("""test is local""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
__UpperCamelCase = unittest.skip("""test is packaged""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
__UpperCamelCase = unittest.skip("""test requires remote""" )(__A )
return test_case
def _lowercase ( *__A ):
'''simple docstring'''
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(__A ) and name.startswith("""test""" ):
for decorator in decorators:
__UpperCamelCase = decorator(__A )
setattr(cls ,__A ,__A )
return cls
return decorate
class UpperCAmelCase__ ( UpperCAmelCase_):
pass
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 2
@contextmanager
def _lowercase ( __A=OfflineSimulationMode.CONNECTION_FAILS ,__A=1E-16 ):
'''simple docstring'''
__UpperCamelCase = requests.Session().request
def timeout_request(__A ,__A ,__A ,**__A ):
# Change the url to an invalid url so that the connection hangs
__UpperCamelCase = """https://10.255.255.1"""
if kwargs.get("""timeout""" ) is None:
raise RequestWouldHangIndefinitelyError(
f"Tried a call to {url} in offline mode with no timeout set. Please set a timeout." )
__UpperCamelCase = timeout
try:
return online_request(__A ,__A ,**__A )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__UpperCamelCase = url
__UpperCamelCase = e.args[0]
__UpperCamelCase = (max_retry_error.args[0].replace("""10.255.255.1""" ,f"OfflineMock[{url}]" ),)
__UpperCamelCase = (max_retry_error,)
raise
def raise_connection_error(__A ,__A ,**__A ):
raise requests.ConnectionError("""Offline mode is enabled.""" ,request=__A )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("""requests.Session.send""" ,__A ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("""requests.Session.request""" ,__A ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("""datasets.config.HF_DATASETS_OFFLINE""" ,__A ):
yield
else:
raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" )
@contextmanager
def _lowercase ( *__A ,**__A ):
'''simple docstring'''
__UpperCamelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__A ,**__A ) as tmp_dir:
try:
os.chdir(__A )
yield
finally:
os.chdir(__A )
@contextmanager
def _lowercase ( ):
'''simple docstring'''
import gc
gc.collect()
__UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _lowercase ( ):
'''simple docstring'''
import gc
gc.collect()
__UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _lowercase ( __A ,__A ):
'''simple docstring'''
return deepcopy(__A ).integers(0 ,100 ,10 ).tolist() == deepcopy(__A ).integers(0 ,100 ,10 ).tolist()
def _lowercase ( __A ):
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(__A ,*__A ,**__A ):
try:
return func(*__A ,**__A )
except HTTPError as err:
if str(__A ).startswith("""500""" ) or str(__A ).startswith("""502""" ):
pytest.xfail(str(__A ) )
raise err
return decorator.decorator(_wrapper ,__A )
class UpperCAmelCase__ :
def __init__( self , lowercase , lowercase , lowercase ) -> str:
__UpperCamelCase = returncode
__UpperCamelCase = stdout
__UpperCamelCase = stderr
async def _lowercase ( __A ,__A ):
'''simple docstring'''
while True:
__UpperCamelCase = await stream.readline()
if line:
callback(__A )
else:
break
async def _lowercase ( __A ,__A=None ,__A=None ,__A=None ,__A=False ,__A=False ):
'''simple docstring'''
if echo:
print("""\nRunning: """ ,""" """.join(__A ) )
__UpperCamelCase = await asyncio.create_subprocess_exec(
cmd[0] ,*cmd[1:] ,stdin=__A ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=__A ,)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__UpperCamelCase = []
__UpperCamelCase = []
def tee(__A ,__A ,__A ,__A="" ):
__UpperCamelCase = line.decode("""utf-8""" ).rstrip()
sink.append(__A )
if not quiet:
print(__A ,__A ,file=__A )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout ,lambda __A : tee(__A ,__A ,sys.stdout ,label="""stdout:""" ) ),
_read_stream(p.stderr ,lambda __A : tee(__A ,__A ,sys.stderr ,label="""stderr:""" ) ),
] ,timeout=__A ,)
return _RunOutput(await p.wait() ,__A ,__A )
def _lowercase ( __A ,__A=None ,__A=None ,__A=180 ,__A=False ,__A=True ):
'''simple docstring'''
__UpperCamelCase = asyncio.get_event_loop()
__UpperCamelCase = loop.run_until_complete(
_stream_subprocess(__A ,env=__A ,stdin=__A ,timeout=__A ,quiet=__A ,echo=__A ) )
__UpperCamelCase = """ """.join(__A )
if result.returncode > 0:
__UpperCamelCase = """\n""".join(result.stderr )
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output." )
return result
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = os.environ.get("""PYTEST_XDIST_WORKER""" ,"""gw0""" )
__UpperCamelCase = re.sub(R"""^gw""" ,"""""" ,__A ,0 ,re.M )
return int(__A )
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = 29_500
__UpperCamelCase = pytest_xdist_worker_id()
return port + uniq_delta
| 349 | 1 |
'''simple docstring'''
import logging
import os
from .state import PartialState
class UpperCAmelCase__ ( logging.LoggerAdapter):
@staticmethod
def __lowerCamelCase ( lowercase ) -> Dict:
__UpperCamelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __lowerCamelCase ( self , lowercase , lowercase , *lowercase , **lowercase ) -> List[str]:
if PartialState._shared_state == {}:
raise RuntimeError(
"""You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.""" )
__UpperCamelCase = kwargs.pop("""main_process_only""" , lowercase )
__UpperCamelCase = kwargs.pop("""in_order""" , lowercase )
if self.isEnabledFor(lowercase ):
if self._should_log(lowercase ):
__UpperCamelCase , __UpperCamelCase = self.process(lowercase , lowercase )
self.logger.log(lowercase , lowercase , *lowercase , **lowercase )
elif in_order:
__UpperCamelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__UpperCamelCase , __UpperCamelCase = self.process(lowercase , lowercase )
self.logger.log(lowercase , lowercase , *lowercase , **lowercase )
state.wait_for_everyone()
def _lowercase ( __A ,__A = None ):
'''simple docstring'''
if log_level is None:
__UpperCamelCase = os.environ.get("""ACCELERATE_LOG_LEVEL""" ,__A )
__UpperCamelCase = logging.getLogger(__A )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(__A ,{} )
| 349 |
'''simple docstring'''
import re
def _lowercase ( __A ):
'''simple docstring'''
return [char.split() for char in re.split(R"""[^ a-z A-Z 0-9 \s]""" ,str_ )]
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = split_input(str_ )
return "".join(
["""""".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
try:
__UpperCamelCase = split_input(__A )
if upper:
__UpperCamelCase = """""".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
__UpperCamelCase = """""".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def _lowercase ( __A ):
'''simple docstring'''
return to_simple_case(__A )
def _lowercase ( __A ):
'''simple docstring'''
try:
__UpperCamelCase = to_simple_case(__A )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def _lowercase ( __A ,__A ):
'''simple docstring'''
return to_complex_case(__A ,__A ,"""_""" )
def _lowercase ( __A ,__A ):
'''simple docstring'''
return to_complex_case(__A ,__A ,"""-""" )
if __name__ == "__main__":
__import__('doctest').testmod()
| 349 | 1 |
'''simple docstring'''
from __future__ import annotations
def _lowercase ( __A ):
'''simple docstring'''
if len(__A ) == 0:
return array
__UpperCamelCase , __UpperCamelCase = min(__A ), max(__A )
# Compute the variables
__UpperCamelCase = _max - _min + 1
__UpperCamelCase , __UpperCamelCase = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
__UpperCamelCase = i - _min
__UpperCamelCase = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
__UpperCamelCase = 0
for i in range(__A ):
while holes_repeat[i] > 0:
__UpperCamelCase = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : Optional[Any] = input('Enter numbers separated by comma:\n')
a__ : List[str] = [int(x) for x in user_input.split(',')]
print(pigeon_sort(unsorted))
| 349 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowerCamelCase ( self ) -> int:
__UpperCamelCase = 1
__UpperCamelCase = 3
__UpperCamelCase = (3_2, 3_2)
__UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowercase )
return image
@property
def __lowerCamelCase ( self ) -> Dict:
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
return model
@property
def __lowerCamelCase ( self ) -> List[str]:
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def __lowerCamelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
__UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(lowercase )
@property
def __lowerCamelCase ( self ) -> Tuple:
def extract(*lowercase , **lowercase ):
class UpperCAmelCase__ :
def __init__( self ) -> Tuple:
__UpperCamelCase = torch.ones([0] )
def __lowerCamelCase ( self , lowercase ) -> List[str]:
self.pixel_values.to(lowercase )
return self
return Out()
return extract
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.dummy_cond_unet
__UpperCamelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowercase , set_alpha_to_one=lowercase , )
__UpperCamelCase = self.dummy_vae
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
__UpperCamelCase = StableDiffusionPipeline(
unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = """A painting of a squirrel eating a burger"""
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 )
__UpperCamelCase = sd_pipe([prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
__UpperCamelCase = output.images
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=lowercase , )[0]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__UpperCamelCase = np.array([0.5_756, 0.6_118, 0.5_005, 0.5_041, 0.5_471, 0.4_726, 0.4_976, 0.4_865, 0.4_864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ) -> Tuple:
__UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.dummy_cond_unet
__UpperCamelCase = PNDMScheduler(skip_prk_steps=lowercase )
__UpperCamelCase = self.dummy_vae
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
__UpperCamelCase = StableDiffusionPipeline(
unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = """A painting of a squirrel eating a burger"""
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 )
__UpperCamelCase = sd_pipe([prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
__UpperCamelCase = output.images
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=lowercase , )[0]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__UpperCamelCase = np.array([0.5_125, 0.5_716, 0.4_828, 0.5_060, 0.5_650, 0.4_768, 0.5_185, 0.4_895, 0.4_993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=lowercase )
assert isinstance(lowercase , lowercase )
assert isinstance(pipe.scheduler , lowercase )
assert pipe.safety_checker is None
__UpperCamelCase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowercase )
__UpperCamelCase = StableDiffusionPipeline.from_pretrained(lowercase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__UpperCamelCase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def __lowerCamelCase ( self ) -> Optional[int]:
__UpperCamelCase = self.dummy_cond_unet
__UpperCamelCase = PNDMScheduler(skip_prk_steps=lowercase )
__UpperCamelCase = self.dummy_vae
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
__UpperCamelCase = unet.half()
__UpperCamelCase = vae.half()
__UpperCamelCase = bert.half()
# make sure here that pndm scheduler skips prk
__UpperCamelCase = StableDiffusionPipeline(
unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = """A painting of a squirrel eating a burger"""
__UpperCamelCase = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images
assert image.shape == (1, 6_4, 6_4, 3)
@nightly
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=lowercase )
__UpperCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
__UpperCamelCase = 4_0_0_3_6_6_0_3_4_6
__UpperCamelCase = 7
# without safety guidance (sld_guidance_scale = 0)
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = [0.2_278, 0.2_231, 0.2_249, 0.2_333, 0.2_303, 0.1_885, 0.2_273, 0.2_144, 0.2_176]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = [0.2_383, 0.2_276, 0.236, 0.2_192, 0.2_186, 0.2_053, 0.1_971, 0.1_901, 0.1_719]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=lowercase )
__UpperCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = """padme amidala taking a bath artwork, safe for work, no nudity"""
__UpperCamelCase = 2_7_3_4_9_7_1_7_5_5
__UpperCamelCase = 7
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = [0.3_502, 0.3_622, 0.3_396, 0.3_642, 0.3_478, 0.3_318, 0.35, 0.3_348, 0.3_297]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = [0.5_531, 0.5_206, 0.4_895, 0.5_156, 0.5_182, 0.4_751, 0.4_802, 0.4_803, 0.4_443]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
__UpperCamelCase = 1_0_4_4_3_5_5_2_3_4
__UpperCamelCase = 1_2
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = np.array([0.5_818, 0.6_285, 0.6_835, 0.6_019, 0.625, 0.6_754, 0.6_096, 0.6_334, 0.6_561] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 349 | 1 |
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase__ :
@staticmethod
def __lowerCamelCase ( *lowercase , **lowercase ) -> Any:
pass
@is_pipeline_test
@require_vision
class UpperCAmelCase__ ( unittest.TestCase):
@require_torch
def __lowerCamelCase ( self ) -> int:
__UpperCamelCase = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , )
__UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__UpperCamelCase = image_classifier(lowercase , candidate_labels=["""a""", """b""", """c"""] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(lowercase ) , [
[{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}],
[{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """c"""}, {"""score""": 0.333, """label""": """b"""}],
] , )
__UpperCamelCase = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(lowercase ) , [
[
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
],
] , )
@require_tf
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , framework="""tf""" )
__UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__UpperCamelCase = image_classifier(lowercase , candidate_labels=["""a""", """b""", """c"""] )
self.assertEqual(
nested_simplify(lowercase ) , [{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}] , )
__UpperCamelCase = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(lowercase ) , [
[
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
],
] , )
@slow
@require_torch
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , )
# This is an image of 2 cats with remotes and no planes
__UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__UpperCamelCase = image_classifier(lowercase , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(lowercase ) , [
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
] , )
__UpperCamelCase = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(lowercase ) , [
[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
],
]
* 5 , )
@slow
@require_tf
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , framework="""tf""" )
# This is an image of 2 cats with remotes and no planes
__UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__UpperCamelCase = image_classifier(lowercase , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(lowercase ) , [
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
] , )
__UpperCamelCase = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(lowercase ) , [
[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
],
]
* 5 , )
| 349 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Any:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> int:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> Dict:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> List[str]:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> int:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Any:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Dict:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> Union[str, Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Dict:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> int:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> List[str]:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> int:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> str:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> int:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> Any:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> List[str]:
requires_backends(cls , ["""flax"""] )
| 349 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : Optional[Any] = {'vocab_file': 'spm_char.model'}
a__ : str = {
'vocab_file': {
'microsoft/speecht5_asr': 'https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model',
'microsoft/speecht5_tts': 'https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model',
'microsoft/speecht5_vc': 'https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model',
}
}
a__ : Any = {
'microsoft/speecht5_asr': 1_0_2_4,
'microsoft/speecht5_tts': 1_0_2_4,
'microsoft/speecht5_vc': 1_0_2_4,
}
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = ['''input_ids''', '''attention_mask''']
def __init__( self , lowercase , lowercase="<s>" , lowercase="</s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase = None , **lowercase , ) -> None:
__UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , pad_token=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
__UpperCamelCase = vocab_file
__UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase )
@property
def __lowerCamelCase ( self ) -> int:
return self.sp_model.get_piece_size()
def __lowerCamelCase ( self ) -> int:
__UpperCamelCase = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Union[str, Any]:
__UpperCamelCase = self.__dict__.copy()
__UpperCamelCase = None
return state
def __setstate__( self , lowercase ) -> Union[str, Any]:
__UpperCamelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__UpperCamelCase = {}
__UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self , lowercase ) -> List[str]:
return self.sp_model.encode(lowercase , out_type=lowercase )
def __lowerCamelCase ( self , lowercase ) -> Tuple:
return self.sp_model.piece_to_id(lowercase )
def __lowerCamelCase ( self , lowercase ) -> str:
__UpperCamelCase = self.sp_model.IdToPiece(lowercase )
return token
def __lowerCamelCase ( self , lowercase ) -> str:
__UpperCamelCase = []
__UpperCamelCase = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase ) + token
__UpperCamelCase = []
else:
current_sub_tokens.append(lowercase )
out_string += self.sp_model.decode(lowercase )
return out_string.strip()
def __lowerCamelCase ( self , lowercase , lowercase=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __lowerCamelCase ( self , lowercase , lowercase = None , lowercase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
__UpperCamelCase = [1]
if token_ids_a is None:
return ([0] * len(lowercase )) + suffix_ones
return ([0] * len(lowercase )) + ([0] * len(lowercase )) + suffix_ones
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> Tuple[str]:
if not os.path.isdir(lowercase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__UpperCamelCase = os.path.join(
lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase , """wb""" ) as fi:
__UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (out_vocab_file,)
| 349 |
'''simple docstring'''
import logging
import os
from .state import PartialState
class UpperCAmelCase__ ( logging.LoggerAdapter):
@staticmethod
def __lowerCamelCase ( lowercase ) -> Dict:
__UpperCamelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __lowerCamelCase ( self , lowercase , lowercase , *lowercase , **lowercase ) -> List[str]:
if PartialState._shared_state == {}:
raise RuntimeError(
"""You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.""" )
__UpperCamelCase = kwargs.pop("""main_process_only""" , lowercase )
__UpperCamelCase = kwargs.pop("""in_order""" , lowercase )
if self.isEnabledFor(lowercase ):
if self._should_log(lowercase ):
__UpperCamelCase , __UpperCamelCase = self.process(lowercase , lowercase )
self.logger.log(lowercase , lowercase , *lowercase , **lowercase )
elif in_order:
__UpperCamelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__UpperCamelCase , __UpperCamelCase = self.process(lowercase , lowercase )
self.logger.log(lowercase , lowercase , *lowercase , **lowercase )
state.wait_for_everyone()
def _lowercase ( __A ,__A = None ):
'''simple docstring'''
if log_level is None:
__UpperCamelCase = os.environ.get("""ACCELERATE_LOG_LEVEL""" ,__A )
__UpperCamelCase = logging.getLogger(__A )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(__A ,{} )
| 349 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = 42
class UpperCAmelCase__ ( nn.Module):
def __init__( self , lowercase=3 , lowercase=3 , lowercase=("DownEncoderBlock2D",) , lowercase=(6_4,) , lowercase=2 , lowercase=3_2 , lowercase="silu" , lowercase=True , ) -> Optional[Any]:
super().__init__()
__UpperCamelCase = layers_per_block
__UpperCamelCase = torch.nn.Convad(
lowercase , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
__UpperCamelCase = None
__UpperCamelCase = nn.ModuleList([] )
# down
__UpperCamelCase = block_out_channels[0]
for i, down_block_type in enumerate(lowercase ):
__UpperCamelCase = output_channel
__UpperCamelCase = block_out_channels[i]
__UpperCamelCase = i == len(lowercase ) - 1
__UpperCamelCase = get_down_block(
lowercase , num_layers=self.layers_per_block , in_channels=lowercase , out_channels=lowercase , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=lowercase , resnet_groups=lowercase , attention_head_dim=lowercase , temb_channels=lowercase , )
self.down_blocks.append(lowercase )
# mid
__UpperCamelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowercase , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=lowercase , temb_channels=lowercase , )
# out
__UpperCamelCase = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowercase , eps=1E-6 )
__UpperCamelCase = nn.SiLU()
__UpperCamelCase = 2 * out_channels if double_z else out_channels
__UpperCamelCase = nn.Convad(block_out_channels[-1] , lowercase , 3 , padding=1 )
__UpperCamelCase = False
def __lowerCamelCase ( self , lowercase ) -> List[str]:
__UpperCamelCase = x
__UpperCamelCase = self.conv_in(lowercase )
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowercase ):
def custom_forward(*lowercase ):
return module(*lowercase )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
__UpperCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowercase ) , lowercase , use_reentrant=lowercase )
# middle
__UpperCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowercase , use_reentrant=lowercase )
else:
for down_block in self.down_blocks:
__UpperCamelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(lowercase ) , lowercase )
# middle
__UpperCamelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowercase )
else:
# down
for down_block in self.down_blocks:
__UpperCamelCase = down_block(lowercase )
# middle
__UpperCamelCase = self.mid_block(lowercase )
# post-process
__UpperCamelCase = self.conv_norm_out(lowercase )
__UpperCamelCase = self.conv_act(lowercase )
__UpperCamelCase = self.conv_out(lowercase )
return sample
class UpperCAmelCase__ ( nn.Module):
def __init__( self , lowercase=3 , lowercase=3 , lowercase=("UpDecoderBlock2D",) , lowercase=(6_4,) , lowercase=2 , lowercase=3_2 , lowercase="silu" , lowercase="group" , ) -> int:
super().__init__()
__UpperCamelCase = layers_per_block
__UpperCamelCase = nn.Convad(
lowercase , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
__UpperCamelCase = None
__UpperCamelCase = nn.ModuleList([] )
__UpperCamelCase = in_channels if norm_type == """spatial""" else None
# mid
__UpperCamelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowercase , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowercase , temb_channels=lowercase , )
# up
__UpperCamelCase = list(reversed(lowercase ) )
__UpperCamelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(lowercase ):
__UpperCamelCase = output_channel
__UpperCamelCase = reversed_block_out_channels[i]
__UpperCamelCase = i == len(lowercase ) - 1
__UpperCamelCase = get_up_block(
lowercase , num_layers=self.layers_per_block + 1 , in_channels=lowercase , out_channels=lowercase , prev_output_channel=lowercase , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=lowercase , resnet_groups=lowercase , attention_head_dim=lowercase , temb_channels=lowercase , resnet_time_scale_shift=lowercase , )
self.up_blocks.append(lowercase )
__UpperCamelCase = output_channel
# out
if norm_type == "spatial":
__UpperCamelCase = SpatialNorm(block_out_channels[0] , lowercase )
else:
__UpperCamelCase = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowercase , eps=1E-6 )
__UpperCamelCase = nn.SiLU()
__UpperCamelCase = nn.Convad(block_out_channels[0] , lowercase , 3 , padding=1 )
__UpperCamelCase = False
def __lowerCamelCase ( self , lowercase , lowercase=None ) -> List[Any]:
__UpperCamelCase = z
__UpperCamelCase = self.conv_in(lowercase )
__UpperCamelCase = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowercase ):
def custom_forward(*lowercase ):
return module(*lowercase )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
__UpperCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowercase , lowercase , use_reentrant=lowercase )
__UpperCamelCase = sample.to(lowercase )
# up
for up_block in self.up_blocks:
__UpperCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowercase ) , lowercase , lowercase , use_reentrant=lowercase )
else:
# middle
__UpperCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowercase , lowercase )
__UpperCamelCase = sample.to(lowercase )
# up
for up_block in self.up_blocks:
__UpperCamelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(lowercase ) , lowercase , lowercase )
else:
# middle
__UpperCamelCase = self.mid_block(lowercase , lowercase )
__UpperCamelCase = sample.to(lowercase )
# up
for up_block in self.up_blocks:
__UpperCamelCase = up_block(lowercase , lowercase )
# post-process
if latent_embeds is None:
__UpperCamelCase = self.conv_norm_out(lowercase )
else:
__UpperCamelCase = self.conv_norm_out(lowercase , lowercase )
__UpperCamelCase = self.conv_act(lowercase )
__UpperCamelCase = self.conv_out(lowercase )
return sample
class UpperCAmelCase__ ( nn.Module):
def __init__( self , lowercase , lowercase , lowercase , lowercase=None , lowercase="random" , lowercase=False , lowercase=True ) -> int:
super().__init__()
__UpperCamelCase = n_e
__UpperCamelCase = vq_embed_dim
__UpperCamelCase = beta
__UpperCamelCase = legacy
__UpperCamelCase = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
__UpperCamelCase = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
__UpperCamelCase = self.used.shape[0]
__UpperCamelCase = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
__UpperCamelCase = self.re_embed
__UpperCamelCase = self.re_embed + 1
print(
f"Remapping {self.n_e} indices to {self.re_embed} indices. "
f"Using {self.unknown_index} for unknown indices." )
else:
__UpperCamelCase = n_e
__UpperCamelCase = sane_index_shape
def __lowerCamelCase ( self , lowercase ) -> List[Any]:
__UpperCamelCase = inds.shape
assert len(lowercase ) > 1
__UpperCamelCase = inds.reshape(ishape[0] , -1 )
__UpperCamelCase = self.used.to(lowercase )
__UpperCamelCase = (inds[:, :, None] == used[None, None, ...]).long()
__UpperCamelCase = match.argmax(-1 )
__UpperCamelCase = match.sum(2 ) < 1
if self.unknown_index == "random":
__UpperCamelCase = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
__UpperCamelCase = self.unknown_index
return new.reshape(lowercase )
def __lowerCamelCase ( self , lowercase ) -> Optional[Any]:
__UpperCamelCase = inds.shape
assert len(lowercase ) > 1
__UpperCamelCase = inds.reshape(ishape[0] , -1 )
__UpperCamelCase = self.used.to(lowercase )
if self.re_embed > self.used.shape[0]: # extra token
__UpperCamelCase = 0 # simply set to zero
__UpperCamelCase = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowercase )
return back.reshape(lowercase )
def __lowerCamelCase ( self , lowercase ) -> int:
# reshape z -> (batch, height, width, channel) and flatten
__UpperCamelCase = z.permute(0 , 2 , 3 , 1 ).contiguous()
__UpperCamelCase = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
__UpperCamelCase = torch.argmin(torch.cdist(lowercase , self.embedding.weight ) , dim=1 )
__UpperCamelCase = self.embedding(lowercase ).view(z.shape )
__UpperCamelCase = None
__UpperCamelCase = None
# compute loss for embedding
if not self.legacy:
__UpperCamelCase = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
__UpperCamelCase = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
__UpperCamelCase = z + (z_q - z).detach()
# reshape back to match original input shape
__UpperCamelCase = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
__UpperCamelCase = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
__UpperCamelCase = self.remap_to_used(lowercase )
__UpperCamelCase = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
__UpperCamelCase = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __lowerCamelCase ( self , lowercase , lowercase ) -> Optional[int]:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
__UpperCamelCase = indices.reshape(shape[0] , -1 ) # add batch axis
__UpperCamelCase = self.unmap_to_all(lowercase )
__UpperCamelCase = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
__UpperCamelCase = self.embedding(lowercase )
if shape is not None:
__UpperCamelCase = z_q.view(lowercase )
# reshape back to match original input shape
__UpperCamelCase = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class UpperCAmelCase__ ( UpperCAmelCase_):
def __init__( self , lowercase , lowercase=False ) -> int:
__UpperCamelCase = parameters
__UpperCamelCase , __UpperCamelCase = torch.chunk(lowercase , 2 , dim=1 )
__UpperCamelCase = torch.clamp(self.logvar , -30.0 , 20.0 )
__UpperCamelCase = deterministic
__UpperCamelCase = torch.exp(0.5 * self.logvar )
__UpperCamelCase = torch.exp(self.logvar )
if self.deterministic:
__UpperCamelCase = __UpperCamelCase = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __lowerCamelCase ( self , lowercase = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
__UpperCamelCase = randn_tensor(
self.mean.shape , generator=lowercase , device=self.parameters.device , dtype=self.parameters.dtype )
__UpperCamelCase = self.mean + self.std * sample
return x
def __lowerCamelCase ( self , lowercase=None ) -> List[Any]:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __lowerCamelCase ( self , lowercase , lowercase=[1, 2, 3] ) -> List[Any]:
if self.deterministic:
return torch.Tensor([0.0] )
__UpperCamelCase = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowercase )
def __lowerCamelCase ( self ) -> Optional[int]:
return self.mean
| 349 |
'''simple docstring'''
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
a__ : Optional[Any] = logging.getLogger(__name__)
class UpperCAmelCase__ :
def __init__( self ) -> Any:
__UpperCamelCase = False
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase ) -> str:
if not self.initialized:
__UpperCamelCase = RagRetriever(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , )
__UpperCamelCase = True
def __lowerCamelCase ( self ) -> Optional[Any]:
self.retriever.index.init_index()
def __lowerCamelCase ( self , lowercase , lowercase ) -> Dict:
__UpperCamelCase , __UpperCamelCase = self.retriever._main_retrieve(lowercase , lowercase )
return doc_ids, retrieved_doc_embeds
class UpperCAmelCase__ ( UpperCAmelCase_):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase=None ) -> List[Any]:
if index is not None and index.is_initialized() and len(lowercase ) > 0:
raise ValueError(
"""When using Ray for distributed fine-tuning, """
"""you'll need to provide the paths instead, """
"""as the dataset and the index are loaded """
"""separately. More info in examples/rag/use_own_knowledge_dataset.py """ )
super().__init__(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , )
__UpperCamelCase = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(lowercase , lowercase , lowercase , lowercase )
for worker in self.retrieval_workers
] )
def __lowerCamelCase ( self ) -> Dict:
logger.info("""initializing retrieval""" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __lowerCamelCase ( self , lowercase , lowercase ) -> List[str]:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__UpperCamelCase = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__UpperCamelCase , __UpperCamelCase = ray.get(random_worker.retrieve.remote(lowercase , lowercase ) )
else:
__UpperCamelCase , __UpperCamelCase = self._main_retrieve(lowercase , lowercase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowercase )
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase=None , **lowercase ) -> Any:
return super(lowercase , cls ).get_tokenizers(lowercase , lowercase , **lowercase )
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase , lowercase=None , **lowercase ) -> int:
__UpperCamelCase = kwargs.pop("""config""" , lowercase ) or RagConfig.from_pretrained(lowercase , **lowercase )
__UpperCamelCase = RagTokenizer.from_pretrained(lowercase , config=lowercase )
__UpperCamelCase = rag_tokenizer.question_encoder
__UpperCamelCase = rag_tokenizer.generator
if indexed_dataset is not None:
__UpperCamelCase = """custom"""
__UpperCamelCase = CustomHFIndex(config.retrieval_vector_size , lowercase )
else:
__UpperCamelCase = cls._build_index(lowercase )
return cls(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , retrieval_workers=lowercase , index=lowercase , )
| 349 | 1 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
a__ : Optional[Any] = abspath(join(dirname(__file__), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def _lowercase ( __A ):
'''simple docstring'''
config.addinivalue_line(
"""markers""" ,"""is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested""" )
config.addinivalue_line(
"""markers""" ,"""is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested""" )
config.addinivalue_line("""markers""" ,"""is_pipeline_test: mark test to run only when pipelines are tested""" )
config.addinivalue_line("""markers""" ,"""is_staging_test: mark test to run only in the staging environment""" )
config.addinivalue_line("""markers""" ,"""accelerate_tests: mark test that require accelerate""" )
config.addinivalue_line("""markers""" ,"""tool_tests: mark the tool tests that are run on their specific schedule""" )
def _lowercase ( __A ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__A )
def _lowercase ( __A ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
__UpperCamelCase = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(__A ,id=__A )
def _lowercase ( __A ,__A ):
'''simple docstring'''
if exitstatus == 5:
__UpperCamelCase = 0
# Doctest custom flag to ignore output.
a__ : List[Any] = doctest.register_optionflag('IGNORE_RESULT')
a__ : Optional[int] = doctest.OutputChecker
class UpperCAmelCase__ ( UpperCAmelCase_):
def __lowerCamelCase ( self , lowercase , lowercase , lowercase ) -> Dict:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , lowercase , lowercase , lowercase )
a__ : Any = CustomOutputChecker
a__ : Tuple = HfDoctestModule
a__ : Union[str, Any] = HfDocTestParser
| 349 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : Union[str, Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a__ : Any = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
a__ : Optional[Any] = {
'squeezebert/squeezebert-uncased': 5_1_2,
'squeezebert/squeezebert-mnli': 5_1_2,
'squeezebert/squeezebert-mnli-headless': 5_1_2,
}
a__ : Optional[Any] = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = SqueezeBertTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ) -> Tuple:
super().__init__(
lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , )
__UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowercase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowercase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowercase ) != tokenize_chinese_chars
):
__UpperCamelCase = getattr(lowercase , normalizer_state.pop("""type""" ) )
__UpperCamelCase = do_lower_case
__UpperCamelCase = strip_accents
__UpperCamelCase = tokenize_chinese_chars
__UpperCamelCase = normalizer_class(**lowercase )
__UpperCamelCase = do_lower_case
def __lowerCamelCase ( self , lowercase , lowercase=None ) -> Tuple:
__UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> List[int]:
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> Tuple[str]:
__UpperCamelCase = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
| 349 | 1 |
'''simple docstring'''
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
a__ : Union[str, Any] = float('nan')
class UpperCAmelCase__ :
def __init__( self , lowercase ) -> List[Any]:
__UpperCamelCase = sys.stdout
__UpperCamelCase = open(lowercase , """a""" )
def __getattr__( self , lowercase ) -> str:
return getattr(self.stdout , lowercase )
def __lowerCamelCase ( self , lowercase ) -> Any:
self.stdout.write(lowercase )
# strip tqdm codes
self.file.write(re.sub(r"""^.*\r""" , """""" , lowercase , 0 , re.M ) )
def _lowercase ( __A=80 ,__A=False ):
'''simple docstring'''
__UpperCamelCase = []
# deal with critical env vars
__UpperCamelCase = ["""CUDA_VISIBLE_DEVICES"""]
for key in env_keys:
__UpperCamelCase = os.environ.get(__A ,__A )
if val is not None:
cmd.append(f"{key}={val}" )
# python executable (not always needed if the script is executable)
__UpperCamelCase = sys.executable if full_python_path else sys.executable.split("""/""" )[-1]
cmd.append(__A )
# now the normal args
cmd += list(map(shlex.quote ,sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
__UpperCamelCase = []
__UpperCamelCase = """"""
while len(__A ) > 0:
current_line += f"{cmd.pop(0 )} "
if len(__A ) == 0 or len(__A ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(__A )
__UpperCamelCase = """"""
return "\\\n".join(__A )
def _lowercase ( __A ,__A ):
'''simple docstring'''
__UpperCamelCase = re.sub(R"""[\\\n]+""" ,""" """ ,args.base_cmd )
# remove --output_dir if any and set our own
__UpperCamelCase = re.sub("""--output_dir\s+[^\s]+""" ,"""""" ,args.base_cmd )
args.base_cmd += f" --output_dir {output_dir}"
# ensure we have --overwrite_output_dir
__UpperCamelCase = re.sub("""--overwrite_output_dir\s+""" ,"""""" ,args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def _lowercase ( __A ,__A ,__A ,__A ,__A ,__A ,__A ):
'''simple docstring'''
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 ,100 ) for k in metric_keys} ,**{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.2222_2222] )} ,)
__UpperCamelCase = subprocess.run(__A ,capture_output=__A ,text=__A )
if verbose:
print("""STDOUT""" ,result.stdout )
print("""STDERR""" ,result.stderr )
# save the streams
__UpperCamelCase = variation.replace(""" """ ,"""-""" )
with open(Path(__A ) / f"log.{prefix}.stdout.txt" ,"""w""" ) as f:
f.write(result.stdout )
with open(Path(__A ) / f"log.{prefix}.stderr.txt" ,"""w""" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("""failed""" )
return {target_metric_key: nan}
with io.open(f"{output_dir}/all_results.json" ,"""r""" ,encoding="""utf-8""" ) as f:
__UpperCamelCase = json.load(__A )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def _lowercase ( __A ,__A ,__A ,__A ,__A ,__A ,__A ,__A ,__A ,__A ,):
'''simple docstring'''
__UpperCamelCase = []
__UpperCamelCase = []
__UpperCamelCase = f"{id}: {variation:<{longest_variation_len}}"
__UpperCamelCase = f"{preamble}: "
__UpperCamelCase = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(__A ) ,desc=__A ,leave=__A ):
__UpperCamelCase = process_run_single(
__A ,__A ,__A ,__A ,__A ,__A ,__A )
__UpperCamelCase = single_run_metrics[target_metric_key]
if not math.isnan(__A ):
metrics.append(__A )
results.append(__A )
outcome += "✓"
else:
outcome += "✘"
__UpperCamelCase = f"\33[2K\r{outcome}"
if len(__A ) > 0:
__UpperCamelCase = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
__UpperCamelCase = round(mean_metrics[target_metric_key] ,2 )
__UpperCamelCase = f"{outcome} {mean_target}"
if len(__A ) > 1:
results_str += f" {tuple(round(__A ,2 ) for x in results )}"
print(__A )
__UpperCamelCase = variation
return mean_metrics
else:
print(__A )
return {variation_key: variation, target_metric_key: nan}
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = torch.cuda.get_device_properties(torch.device("""cuda""" ) )
return f"\nDatetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n"
def _lowercase ( __A ,__A ,__A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = pd.DataFrame(__A )
__UpperCamelCase = """variation"""
__UpperCamelCase = """diff_%"""
__UpperCamelCase = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
__UpperCamelCase = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(__A ):
# as a fallback, use the minimal value as the sentinel
__UpperCamelCase = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(__A ):
__UpperCamelCase = df.apply(
lambda __A : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 ,axis="""columns""" ,)
# re-order columns
__UpperCamelCase = [variation_key, target_metric_key, diff_key, *report_metric_keys]
__UpperCamelCase = df.reindex(__A ,axis="""columns""" ) # reorder cols
# capitalize
__UpperCamelCase = df.rename(str.capitalize ,axis="""columns""" )
# make the cols as narrow as possible
__UpperCamelCase = df.rename(lambda __A : c.replace("""_""" ,"""<br>""" ) ,axis="""columns""" )
__UpperCamelCase = df.rename(lambda __A : c.replace("""_""" ,"""\n""" ) ,axis="""columns""" )
__UpperCamelCase = ["""""", """Copy between the cut-here-lines and paste as is to github or a forum"""]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=__A ,floatfmt=""".2f""" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=__A ,floatfmt=""".2f""" )]
print("""\n\n""".join(__A ) )
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--base-cmd""" ,default=__A ,type=__A ,required=__A ,help="""Base cmd""" ,)
parser.add_argument(
"""--variations""" ,default=__A ,type=__A ,nargs="""+""" ,required=__A ,help="""Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'""" ,)
parser.add_argument(
"""--base-variation""" ,default=__A ,type=__A ,help="""Baseline variation to compare to. if None the minimal target value will be used to compare against""" ,)
parser.add_argument(
"""--target-metric-key""" ,default=__A ,type=__A ,required=__A ,help="""Target metric key in output_dir/all_results.json, e.g., train_samples_per_second""" ,)
parser.add_argument(
"""--report-metric-keys""" ,default="""""" ,type=__A ,help="""Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples""" ,)
parser.add_argument(
"""--repeat-times""" ,default=1 ,type=__A ,help="""How many times to re-run each variation - an average will be reported""" ,)
parser.add_argument(
"""--output_dir""" ,default="""output_benchmark""" ,type=__A ,help="""The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked""" ,)
parser.add_argument(
"""--verbose""" ,default=__A ,action="""store_true""" ,help="""Whether to show the outputs of each run or just the benchmark progress""" ,)
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = args.output_dir
Path(__A ).mkdir(exist_ok=__A )
__UpperCamelCase = get_base_command(__A ,__A )
# split each dimension into its --foo variations
__UpperCamelCase = [list(map(str.strip ,re.split(R"""\|""" ,__A ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
__UpperCamelCase = list(map(str.strip ,map(""" """.join ,itertools.product(*__A ) ) ) )
__UpperCamelCase = max(len(__A ) for x in variations )
# split wanted keys
__UpperCamelCase = args.report_metric_keys.split()
# capture prints into a log file for convenience
__UpperCamelCase = f"benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt"
print(f"\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt" )
print(f"and this script's output is also piped into {report_fn}" )
__UpperCamelCase = Tee(__A )
print(f"\n*** Running {len(__A )} benchmarks:" )
print(f"Base command: {' '.join(__A )}" )
__UpperCamelCase = """variation"""
__UpperCamelCase = []
for id, variation in enumerate(tqdm(__A ,desc="""Total completion: """ ,leave=__A ) ):
__UpperCamelCase = base_cmd + variation.split()
results.append(
process_run(
id + 1 ,__A ,__A ,__A ,__A ,args.target_metric_key ,__A ,args.repeat_times ,__A ,args.verbose ,) )
process_results(__A ,args.target_metric_key ,__A ,args.base_variation ,__A )
if __name__ == "__main__":
main()
| 349 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
a__ : str = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 349 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = None
class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = 2
@register_to_config
def __init__( self , lowercase = 0.02 , lowercase = 1_0_0 , lowercase = 1.007 , lowercase = 8_0 , lowercase = 0.05 , lowercase = 5_0 , ) -> List[Any]:
# standard deviation of the initial noise distribution
__UpperCamelCase = sigma_max
# setable values
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None # sigma(t_i)
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> torch.FloatTensor:
return sample
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> Any:
__UpperCamelCase = num_inference_steps
__UpperCamelCase = np.arange(0 , self.num_inference_steps )[::-1].copy()
__UpperCamelCase = torch.from_numpy(lowercase ).to(lowercase )
__UpperCamelCase = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
__UpperCamelCase = torch.tensor(lowercase , dtype=torch.floataa , device=lowercase )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase = None ) -> Tuple[torch.FloatTensor, float]:
if self.config.s_min <= sigma <= self.config.s_max:
__UpperCamelCase = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
__UpperCamelCase = 0
# sample eps ~ N(0, S_noise^2 * I)
__UpperCamelCase = self.config.s_noise * randn_tensor(sample.shape , generator=lowercase ).to(sample.device )
__UpperCamelCase = sigma + gamma * sigma
__UpperCamelCase = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase = True , ) -> Union[KarrasVeOutput, Tuple]:
__UpperCamelCase = sample_hat + sigma_hat * model_output
__UpperCamelCase = (sample_hat - pred_original_sample) / sigma_hat
__UpperCamelCase = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowercase , derivative=lowercase , pred_original_sample=lowercase )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase = True , ) -> Union[KarrasVeOutput, Tuple]:
__UpperCamelCase = sample_prev + sigma_prev * model_output
__UpperCamelCase = (sample_prev - pred_original_sample) / sigma_prev
__UpperCamelCase = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowercase , derivative=lowercase , pred_original_sample=lowercase )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase ) -> Union[str, Any]:
raise NotImplementedError()
| 349 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
a__ : Union[str, Any] = {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json',
}
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = '''albert'''
def __init__( self , lowercase=3_0_0_0_0 , lowercase=1_2_8 , lowercase=4_0_9_6 , lowercase=1_2 , lowercase=1 , lowercase=6_4 , lowercase=1_6_3_8_4 , lowercase=1 , lowercase="gelu_new" , lowercase=0 , lowercase=0 , lowercase=5_1_2 , lowercase=2 , lowercase=0.02 , lowercase=1E-12 , lowercase=0.1 , lowercase="absolute" , lowercase=0 , lowercase=2 , lowercase=3 , **lowercase , ) -> Any:
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
__UpperCamelCase = vocab_size
__UpperCamelCase = embedding_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_hidden_groups
__UpperCamelCase = num_attention_heads
__UpperCamelCase = inner_group_num
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = classifier_dropout_prob
__UpperCamelCase = position_embedding_type
class UpperCAmelCase__ ( UpperCAmelCase_):
@property
def __lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__UpperCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__UpperCamelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 349 | 1 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def _lowercase ( __A ):
'''simple docstring'''
return (data["data"], data["target"])
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = XGBRegressor(verbosity=0 ,random_state=42 )
xgb.fit(__A ,__A )
# Predict target for test data
__UpperCamelCase = xgb.predict(__A )
__UpperCamelCase = predictions.reshape(len(__A ) ,1 )
return predictions
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = fetch_california_housing()
__UpperCamelCase , __UpperCamelCase = data_handling(__A )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = train_test_split(
__A ,__A ,test_size=0.25 ,random_state=1 )
__UpperCamelCase = xgboost(__A ,__A ,__A )
# Error printing
print(f"Mean Absolute Error : {mean_absolute_error(__A ,__A )}" )
print(f"Mean Square Error : {mean_squared_error(__A ,__A )}" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 349 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def _lowercase ( __A ):
'''simple docstring'''
return (data["data"], data["target"])
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = XGBRegressor(verbosity=0 ,random_state=42 )
xgb.fit(__A ,__A )
# Predict target for test data
__UpperCamelCase = xgb.predict(__A )
__UpperCamelCase = predictions.reshape(len(__A ) ,1 )
return predictions
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = fetch_california_housing()
__UpperCamelCase , __UpperCamelCase = data_handling(__A )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = train_test_split(
__A ,__A ,test_size=0.25 ,random_state=1 )
__UpperCamelCase = xgboost(__A ,__A ,__A )
# Error printing
print(f"Mean Absolute Error : {mean_absolute_error(__A ,__A )}" )
print(f"Mean Square Error : {mean_squared_error(__A ,__A )}" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 349 | 1 |
'''simple docstring'''
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
a__ : Union[str, Any] = {
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
a__ : int = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
a__ : str = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def _lowercase ( __A ):
'''simple docstring'''
return x[0]
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = get_letter_count(__A )
__UpperCamelCase = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(__A )
__UpperCamelCase = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find ,reverse=__A )
__UpperCamelCase = """""".join(freq_to_letter[freq] )
__UpperCamelCase = list(freq_to_letter_str.items() )
freq_pairs.sort(key=__A ,reverse=__A )
__UpperCamelCase = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(__A )
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = get_frequency_order(__A )
__UpperCamelCase = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class UpperCAmelCase__ :
__SCREAMING_SNAKE_CASE = PegasusConfig
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = '''gelu'''
def __init__( self , lowercase , lowercase=1_3 , lowercase=7 , lowercase=True , lowercase=False , lowercase=9_9 , lowercase=3_2 , lowercase=2 , lowercase=4 , lowercase=3_7 , lowercase=0.1 , lowercase=0.1 , lowercase=4_0 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> Any:
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = eos_token_id
__UpperCamelCase = pad_token_id
__UpperCamelCase = bos_token_id
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__UpperCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__UpperCamelCase = prepare_pegasus_inputs_dict(lowercase , lowercase , lowercase )
return config, inputs_dict
def __lowerCamelCase ( self , lowercase , lowercase ) -> Union[str, Any]:
__UpperCamelCase = TFPegasusModel(config=lowercase ).get_decoder()
__UpperCamelCase = inputs_dict["""input_ids"""]
__UpperCamelCase = input_ids[:1, :]
__UpperCamelCase = inputs_dict["""attention_mask"""][:1, :]
__UpperCamelCase = inputs_dict["""head_mask"""]
__UpperCamelCase = 1
# first forward pass
__UpperCamelCase = model(lowercase , attention_mask=lowercase , head_mask=lowercase , use_cache=lowercase )
__UpperCamelCase , __UpperCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__UpperCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
__UpperCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__UpperCamelCase = model(lowercase , attention_mask=lowercase )[0]
__UpperCamelCase = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__UpperCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx]
__UpperCamelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 )
def _lowercase ( __A ,__A ,__A ,__A=None ,__A=None ,__A=None ,__A=None ,__A=None ,):
'''simple docstring'''
if attention_mask is None:
__UpperCamelCase = tf.cast(tf.math.not_equal(__A ,config.pad_token_id ) ,tf.inta )
if decoder_attention_mask is None:
__UpperCamelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ),
] ,axis=-1 ,)
if head_mask is None:
__UpperCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = TFPegasusModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=lowercase )
def __lowerCamelCase ( self ) -> str:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ) -> Tuple:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCAmelCase__ ( unittest.TestCase):
__SCREAMING_SNAKE_CASE = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
__SCREAMING_SNAKE_CASE = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
__SCREAMING_SNAKE_CASE = '''google/pegasus-xsum'''
@cached_property
def __lowerCamelCase ( self ) -> int:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __lowerCamelCase ( self , **lowercase ) -> Optional[int]:
__UpperCamelCase = self.translate_src_text(**lowercase )
assert self.expected_text == generated_words
def __lowerCamelCase ( self , **lowercase ) -> Optional[Any]:
__UpperCamelCase = self.tokenizer(self.src_text , **lowercase , padding=lowercase , return_tensors="""tf""" )
__UpperCamelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowercase , )
__UpperCamelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase )
return generated_words
@slow
def __lowerCamelCase ( self ) -> Dict:
self._assert_generated_batch_equal_expected()
| 349 | 1 |
'''simple docstring'''
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = [int(__A ) for i in ip_va_address.split(""".""" ) if i.isdigit()]
return len(__A ) == 4 and all(0 <= int(__A ) <= 254 for octet in octets )
if __name__ == "__main__":
a__ : Optional[int] = input().strip()
a__ : Tuple = 'valid' if is_ip_va_address_valid(ip) else 'invalid'
print(f'''{ip} is a {valid_or_invalid} IP v4 address.''')
| 349 |
'''simple docstring'''
import string
def _lowercase ( __A ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
__UpperCamelCase = """"""
for symbol in message:
if symbol in string.ascii_uppercase:
__UpperCamelCase = string.ascii_uppercase.find(__A )
__UpperCamelCase = num - key
if num < 0:
__UpperCamelCase = num + len(string.ascii_uppercase )
__UpperCamelCase = translated + string.ascii_uppercase[num]
else:
__UpperCamelCase = translated + symbol
print(f"Decryption using Key #{key}: {translated}" )
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = input("""Encrypted message: """ )
__UpperCamelCase = message.upper()
decrypt(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 349 | 1 |
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a__ : Union[str, Any] = 1_6
a__ : Dict = 3_2
def _lowercase ( __A ,__A ,__A ,__A ,__A = 16 ):
'''simple docstring'''
__UpperCamelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__UpperCamelCase = DatasetDict(
{
"""train""": dataset["""train"""].select(__A ),
"""validation""": dataset["""train"""].select(__A ),
"""test""": dataset["""validation"""],
} )
def tokenize_function(__A ):
# max_length=None => use the model max length (it's actually the default)
__UpperCamelCase = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=__A ,max_length=__A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__UpperCamelCase = datasets.map(
__A ,batched=__A ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCamelCase = tokenized_datasets.rename_column("""label""" ,"""labels""" )
def collate_fn(__A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__UpperCamelCase = 16
elif accelerator.mixed_precision != "no":
__UpperCamelCase = 8
else:
__UpperCamelCase = None
return tokenizer.pad(
__A ,padding="""longest""" ,max_length=__A ,pad_to_multiple_of=__A ,return_tensors="""pt""" ,)
# Instantiate dataloaders.
__UpperCamelCase = DataLoader(
tokenized_datasets["""train"""] ,shuffle=__A ,collate_fn=__A ,batch_size=__A )
__UpperCamelCase = DataLoader(
tokenized_datasets["""validation"""] ,shuffle=__A ,collate_fn=__A ,batch_size=__A )
__UpperCamelCase = DataLoader(
tokenized_datasets["""test"""] ,shuffle=__A ,collate_fn=__A ,batch_size=__A )
return train_dataloader, eval_dataloader, test_dataloader
def _lowercase ( __A ,__A ):
'''simple docstring'''
__UpperCamelCase = []
# Download the dataset
__UpperCamelCase = load_dataset("""glue""" ,"""mrpc""" )
# Create our splits
__UpperCamelCase = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
__UpperCamelCase = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCamelCase = config["""lr"""]
__UpperCamelCase = int(config["""num_epochs"""] )
__UpperCamelCase = int(config["""seed"""] )
__UpperCamelCase = int(config["""batch_size"""] )
__UpperCamelCase = evaluate.load("""glue""" ,"""mrpc""" )
# If the batch size is too big we use gradient accumulation
__UpperCamelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__UpperCamelCase = batch_size // MAX_GPU_BATCH_SIZE
__UpperCamelCase = MAX_GPU_BATCH_SIZE
set_seed(__A )
# New Code #
# Create our folds:
__UpperCamelCase = kfold.split(np.zeros(datasets["""train"""].num_rows ) ,datasets["""train"""]["""label"""] )
__UpperCamelCase = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(__A ):
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = get_fold_dataloaders(
__A ,__A ,__A ,__A ,)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCamelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" ,return_dict=__A )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__UpperCamelCase = model.to(accelerator.device )
# Instantiate optimizer
__UpperCamelCase = AdamW(params=model.parameters() ,lr=__A )
# Instantiate scheduler
__UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=__A ,num_warmup_steps=100 ,num_training_steps=(len(__A ) * num_epochs) // gradient_accumulation_steps ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
__A ,__A ,__A ,__A ,__A )
# Now we train the model
for epoch in range(__A ):
model.train()
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__UpperCamelCase = model(**__A )
__UpperCamelCase = outputs.loss
__UpperCamelCase = loss / gradient_accumulation_steps
accelerator.backward(__A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCamelCase = model(**__A )
__UpperCamelCase = outputs.logits.argmax(dim=-1 )
__UpperCamelCase , __UpperCamelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__A ,references=__A ,)
__UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" ,__A )
# New Code #
# We also run predictions on the test set at the very end
__UpperCamelCase = []
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCamelCase = model(**__A )
__UpperCamelCase = outputs.logits
__UpperCamelCase , __UpperCamelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(__A ,dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
__UpperCamelCase = torch.cat(__A ,dim=0 )
__UpperCamelCase = torch.stack(__A ,dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
__UpperCamelCase = metric.compute(predictions=__A ,references=__A )
accelerator.print("""Average test metrics from all folds:""" ,__A )
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" ,type=__A ,default=__A ,choices=["""no""", """fp16""", """bf16""", """fp8"""] ,help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" ,)
parser.add_argument("""--cpu""" ,action="""store_true""" ,help="""If passed, will train on the CPU.""" )
# New Code #
parser.add_argument("""--num_folds""" ,type=__A ,default=3 ,help="""The number of splits to perform across the dataset""" )
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__A ,__A )
if __name__ == "__main__":
main()
| 349 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : Dict = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = '''gptj'''
__SCREAMING_SNAKE_CASE = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , lowercase=5_0_4_0_0 , lowercase=2_0_4_8 , lowercase=4_0_9_6 , lowercase=2_8 , lowercase=1_6 , lowercase=6_4 , lowercase=None , lowercase="gelu_new" , lowercase=0.0 , lowercase=0.0 , lowercase=0.0 , lowercase=1E-5 , lowercase=0.02 , lowercase=True , lowercase=5_0_2_5_6 , lowercase=5_0_2_5_6 , lowercase=False , **lowercase , ) -> Tuple:
__UpperCamelCase = vocab_size
__UpperCamelCase = n_positions
__UpperCamelCase = n_embd
__UpperCamelCase = n_layer
__UpperCamelCase = n_head
__UpperCamelCase = n_inner
__UpperCamelCase = rotary_dim
__UpperCamelCase = activation_function
__UpperCamelCase = resid_pdrop
__UpperCamelCase = embd_pdrop
__UpperCamelCase = attn_pdrop
__UpperCamelCase = layer_norm_epsilon
__UpperCamelCase = initializer_range
__UpperCamelCase = use_cache
__UpperCamelCase = bos_token_id
__UpperCamelCase = eos_token_id
super().__init__(
bos_token_id=lowercase , eos_token_id=lowercase , tie_word_embeddings=lowercase , **lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
def __init__( self , lowercase , lowercase = "default" , lowercase = None , lowercase = False , ) -> List[str]:
super().__init__(lowercase , task=lowercase , patching_specs=lowercase , use_past=lowercase )
if not getattr(self._config , """pad_token_id""" , lowercase ):
# TODO: how to do that better?
__UpperCamelCase = 0
@property
def __lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
__UpperCamelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(lowercase , direction="""inputs""" )
__UpperCamelCase = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__UpperCamelCase = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def __lowerCamelCase ( self ) -> int:
return self._config.n_layer
@property
def __lowerCamelCase ( self ) -> int:
return self._config.n_head
def __lowerCamelCase ( self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , ) -> Mapping[str, Any]:
__UpperCamelCase = super(lowercase , self ).generate_dummy_inputs(
lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
# We need to order the input in the way they appears in the forward()
__UpperCamelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__UpperCamelCase , __UpperCamelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__UpperCamelCase = seqlen + 2
__UpperCamelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__UpperCamelCase = [
(torch.zeros(lowercase ), torch.zeros(lowercase )) for _ in range(self.num_layers )
]
__UpperCamelCase = common_inputs["""attention_mask"""]
if self.use_past:
__UpperCamelCase = ordered_inputs["""attention_mask"""].dtype
__UpperCamelCase = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(lowercase , lowercase , dtype=lowercase )] , dim=1 )
return ordered_inputs
@property
def __lowerCamelCase ( self ) -> int:
return 1_3
| 349 | 1 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def _lowercase ( ):
'''simple docstring'''
raise RuntimeError("""CUDA out of memory.""" )
class UpperCAmelCase__ ( nn.Module):
def __init__( self ) -> Dict:
super().__init__()
__UpperCamelCase = nn.Linear(3 , 4 )
__UpperCamelCase = nn.BatchNormad(4 )
__UpperCamelCase = nn.Linear(4 , 5 )
def __lowerCamelCase ( self , lowercase ) -> int:
return self.lineara(self.batchnorm(self.lineara(lowercase ) ) )
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(lowercase ):
nonlocal batch_sizes
batch_sizes.append(lowercase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowercase , [1_2_8, 6_4, 3_2, 1_6, 8] )
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(lowercase , lowercase ):
nonlocal batch_sizes
batch_sizes.append(lowercase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
__UpperCamelCase , __UpperCamelCase = mock_training_loop_function("""hello""" )
self.assertListEqual(lowercase , [1_2_8, 6_4, 3_2, 1_6, 8] )
self.assertListEqual([bs, arga] , [8, """hello"""] )
def __lowerCamelCase ( self ) -> Tuple:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(lowercase ):
pass
with self.assertRaises(lowercase ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def __lowerCamelCase ( self ) -> Union[str, Any]:
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(lowercase ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowercase ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def __lowerCamelCase ( self ) -> Tuple:
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(lowercase , lowercase , lowercase ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowercase ) as cm:
mock_training_loop_function(1_2_8 , """hello""" , """world""" )
self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0] )
self.assertIn("""`f(arg1='hello', arg2='world')""" , cm.exception.args[0] )
def __lowerCamelCase ( self ) -> str:
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(lowercase ):
raise ValueError("""Oops, we had an error!""" )
with self.assertRaises(lowercase ) as cm:
mock_training_loop_function()
self.assertIn("""Oops, we had an error!""" , cm.exception.args[0] )
@require_cuda
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = torch.cuda.memory_allocated()
__UpperCamelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowercase )
__UpperCamelCase = release_memory(lowercase )
self.assertEqual(torch.cuda.memory_allocated() , lowercase )
| 349 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a__ : int = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = ['LayoutLMv3FeatureExtractor']
a__ : str = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 349 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> int:
__UpperCamelCase = tempfile.mkdtemp()
__UpperCamelCase = BlipImageProcessor()
__UpperCamelCase = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
__UpperCamelCase = BlipProcessor(lowercase , lowercase )
processor.save_pretrained(self.tmpdirname )
def __lowerCamelCase ( self , **lowercase ) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).tokenizer
def __lowerCamelCase ( self , **lowercase ) -> List[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).image_processor
def __lowerCamelCase ( self ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def __lowerCamelCase ( self ) -> Tuple:
__UpperCamelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
__UpperCamelCase = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__UpperCamelCase = self.get_image_processor(do_normalize=lowercase , padding_value=1.0 )
__UpperCamelCase = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase )
def __lowerCamelCase ( self ) -> int:
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = BlipProcessor(tokenizer=lowercase , image_processor=lowercase )
__UpperCamelCase = self.prepare_image_inputs()
__UpperCamelCase = image_processor(lowercase , return_tensors="""np""" )
__UpperCamelCase = processor(images=lowercase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCamelCase ( self ) -> int:
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = BlipProcessor(tokenizer=lowercase , image_processor=lowercase )
__UpperCamelCase = """lower newer"""
__UpperCamelCase = processor(text=lowercase )
__UpperCamelCase = tokenizer(lowercase , return_token_type_ids=lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = BlipProcessor(tokenizer=lowercase , image_processor=lowercase )
__UpperCamelCase = """lower newer"""
__UpperCamelCase = self.prepare_image_inputs()
__UpperCamelCase = processor(text=lowercase , images=lowercase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = BlipProcessor(tokenizer=lowercase , image_processor=lowercase )
__UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCamelCase = processor.batch_decode(lowercase )
__UpperCamelCase = tokenizer.batch_decode(lowercase )
self.assertListEqual(lowercase , lowercase )
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = BlipProcessor(tokenizer=lowercase , image_processor=lowercase )
__UpperCamelCase = """lower newer"""
__UpperCamelCase = self.prepare_image_inputs()
__UpperCamelCase = processor(text=lowercase , images=lowercase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 349 |
'''simple docstring'''
def _lowercase ( __A ,__A ):
'''simple docstring'''
__UpperCamelCase = len(__A )
__UpperCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__UpperCamelCase = True
# sum is not zero and set is empty then false
for i in range(1 ,required_sum + 1 ):
__UpperCamelCase = False
for i in range(1 ,arr_len + 1 ):
for j in range(1 ,required_sum + 1 ):
if arr[i - 1] > j:
__UpperCamelCase = subset[i - 1][j]
if arr[i - 1] <= j:
__UpperCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 | 1 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowerCamelCase ( self ) -> int:
__UpperCamelCase = 1
__UpperCamelCase = 3
__UpperCamelCase = (3_2, 3_2)
__UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowercase )
return image
@property
def __lowerCamelCase ( self ) -> Dict:
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
return model
@property
def __lowerCamelCase ( self ) -> List[str]:
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def __lowerCamelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
__UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(lowercase )
@property
def __lowerCamelCase ( self ) -> Tuple:
def extract(*lowercase , **lowercase ):
class UpperCAmelCase__ :
def __init__( self ) -> Tuple:
__UpperCamelCase = torch.ones([0] )
def __lowerCamelCase ( self , lowercase ) -> List[str]:
self.pixel_values.to(lowercase )
return self
return Out()
return extract
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.dummy_cond_unet
__UpperCamelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowercase , set_alpha_to_one=lowercase , )
__UpperCamelCase = self.dummy_vae
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
__UpperCamelCase = StableDiffusionPipeline(
unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = """A painting of a squirrel eating a burger"""
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 )
__UpperCamelCase = sd_pipe([prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
__UpperCamelCase = output.images
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=lowercase , )[0]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__UpperCamelCase = np.array([0.5_756, 0.6_118, 0.5_005, 0.5_041, 0.5_471, 0.4_726, 0.4_976, 0.4_865, 0.4_864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ) -> Tuple:
__UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.dummy_cond_unet
__UpperCamelCase = PNDMScheduler(skip_prk_steps=lowercase )
__UpperCamelCase = self.dummy_vae
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
__UpperCamelCase = StableDiffusionPipeline(
unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = """A painting of a squirrel eating a burger"""
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 )
__UpperCamelCase = sd_pipe([prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
__UpperCamelCase = output.images
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=lowercase , )[0]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__UpperCamelCase = np.array([0.5_125, 0.5_716, 0.4_828, 0.5_060, 0.5_650, 0.4_768, 0.5_185, 0.4_895, 0.4_993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=lowercase )
assert isinstance(lowercase , lowercase )
assert isinstance(pipe.scheduler , lowercase )
assert pipe.safety_checker is None
__UpperCamelCase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowercase )
__UpperCamelCase = StableDiffusionPipeline.from_pretrained(lowercase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__UpperCamelCase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def __lowerCamelCase ( self ) -> Optional[int]:
__UpperCamelCase = self.dummy_cond_unet
__UpperCamelCase = PNDMScheduler(skip_prk_steps=lowercase )
__UpperCamelCase = self.dummy_vae
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
__UpperCamelCase = unet.half()
__UpperCamelCase = vae.half()
__UpperCamelCase = bert.half()
# make sure here that pndm scheduler skips prk
__UpperCamelCase = StableDiffusionPipeline(
unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = """A painting of a squirrel eating a burger"""
__UpperCamelCase = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images
assert image.shape == (1, 6_4, 6_4, 3)
@nightly
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=lowercase )
__UpperCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
__UpperCamelCase = 4_0_0_3_6_6_0_3_4_6
__UpperCamelCase = 7
# without safety guidance (sld_guidance_scale = 0)
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = [0.2_278, 0.2_231, 0.2_249, 0.2_333, 0.2_303, 0.1_885, 0.2_273, 0.2_144, 0.2_176]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = [0.2_383, 0.2_276, 0.236, 0.2_192, 0.2_186, 0.2_053, 0.1_971, 0.1_901, 0.1_719]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=lowercase )
__UpperCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = """padme amidala taking a bath artwork, safe for work, no nudity"""
__UpperCamelCase = 2_7_3_4_9_7_1_7_5_5
__UpperCamelCase = 7
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = [0.3_502, 0.3_622, 0.3_396, 0.3_642, 0.3_478, 0.3_318, 0.35, 0.3_348, 0.3_297]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = [0.5_531, 0.5_206, 0.4_895, 0.5_156, 0.5_182, 0.4_751, 0.4_802, 0.4_803, 0.4_443]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
__UpperCamelCase = 1_0_4_4_3_5_5_2_3_4
__UpperCamelCase = 1_2
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = np.array([0.5_818, 0.6_285, 0.6_835, 0.6_019, 0.625, 0.6_754, 0.6_096, 0.6_334, 0.6_561] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 349 |
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
a__ : Any = get_logger(__name__)
class UpperCAmelCase__ :
def __init__( self , lowercase = None ) -> List[str]:
__UpperCamelCase = (
os.path.join(lowercase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
__UpperCamelCase = Extractor
def __lowerCamelCase ( self , lowercase ) -> str:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
__UpperCamelCase = os.path.abspath(lowercase )
return os.path.join(self.extract_dir , hash_url_to_filename(lowercase ) )
def __lowerCamelCase ( self , lowercase , lowercase ) -> bool:
return force_extract or (
not os.path.isfile(lowercase ) and not (os.path.isdir(lowercase ) and os.listdir(lowercase ))
)
def __lowerCamelCase ( self , lowercase , lowercase = False ) -> str:
__UpperCamelCase = self.extractor.infer_extractor_format(lowercase )
if not extractor_format:
return input_path
__UpperCamelCase = self._get_output_path(lowercase )
if self._do_extract(lowercase , lowercase ):
self.extractor.extract(lowercase , lowercase , lowercase )
return output_path
class UpperCAmelCase__ ( UpperCAmelCase_):
@classmethod
@abstractmethod
def __lowerCamelCase ( cls , lowercase , **lowercase ) -> bool:
...
@staticmethod
@abstractmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
...
class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = []
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> int:
with open(lowercase , """rb""" ) as f:
return f.read(lowercase )
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase = b"" ) -> bool:
if not magic_number:
__UpperCamelCase = max(len(lowercase ) for cls_magic_number in cls.magic_numbers )
try:
__UpperCamelCase = cls.read_magic_number(lowercase , lowercase )
except OSError:
return False
return any(magic_number.startswith(lowercase ) for cls_magic_number in cls.magic_numbers )
class UpperCAmelCase__ ( UpperCAmelCase_):
@classmethod
def __lowerCamelCase ( cls , lowercase , **lowercase ) -> bool:
return tarfile.is_tarfile(lowercase )
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> str:
def resolved(lowercase ) -> str:
return os.path.realpath(os.path.abspath(lowercase ) )
def badpath(lowercase , lowercase ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(lowercase , lowercase ) ).startswith(lowercase )
def badlink(lowercase , lowercase ) -> bool:
# Links are interpreted relative to the directory containing the link
__UpperCamelCase = resolved(os.path.join(lowercase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=lowercase )
__UpperCamelCase = resolved(lowercase )
for finfo in members:
if badpath(finfo.name , lowercase ):
logger.error(f"Extraction of {finfo.name} is blocked (illegal path)" )
elif finfo.issym() and badlink(lowercase , lowercase ):
logger.error(f"Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}" )
elif finfo.islnk() and badlink(lowercase , lowercase ):
logger.error(f"Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}" )
else:
yield finfo
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
os.makedirs(lowercase , exist_ok=lowercase )
__UpperCamelCase = tarfile.open(lowercase )
tar_file.extractall(lowercase , members=TarExtractor.safemembers(lowercase , lowercase ) )
tar_file.close()
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\x1F\x8B''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
with gzip.open(lowercase , """rb""" ) as gzip_file:
with open(lowercase , """wb""" ) as extracted_file:
shutil.copyfileobj(lowercase , lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [
B'''PK\x03\x04''',
B'''PK\x05\x06''', # empty archive
B'''PK\x07\x08''', # spanned archive
]
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase = b"" ) -> bool:
if super().is_extractable(lowercase , magic_number=lowercase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(lowercase , """rb""" ) as fp:
__UpperCamelCase = _EndRecData(lowercase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
__UpperCamelCase = fp.read(lowercase ) # CD is where we expect it to be
if len(lowercase ) == sizeCentralDir:
__UpperCamelCase = struct.unpack(lowercase , lowercase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
os.makedirs(lowercase , exist_ok=lowercase )
with zipfile.ZipFile(lowercase , """r""" ) as zip_file:
zip_file.extractall(lowercase )
zip_file.close()
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\xFD\x37\x7A\x58\x5A\x00''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
with lzma.open(lowercase ) as compressed_file:
with open(lowercase , """wb""" ) as extracted_file:
shutil.copyfileobj(lowercase , lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''Rar!\x1a\x07\x00''', B'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(lowercase , exist_ok=lowercase )
__UpperCamelCase = rarfile.RarFile(lowercase )
rf.extractall(lowercase )
rf.close()
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\x28\xb5\x2F\xFD''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
__UpperCamelCase = zstd.ZstdDecompressor()
with open(lowercase , """rb""" ) as ifh, open(lowercase , """wb""" ) as ofh:
dctx.copy_stream(lowercase , lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\x42\x5A\x68''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
with bza.open(lowercase , """rb""" ) as compressed_file:
with open(lowercase , """wb""" ) as extracted_file:
shutil.copyfileobj(lowercase , lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\x37\x7A\xBC\xAF\x27\x1C''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(lowercase , exist_ok=lowercase )
with pyazr.SevenZipFile(lowercase , """r""" ) as archive:
archive.extractall(lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\x04\x22\x4D\x18''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(lowercase , """rb""" ) as compressed_file:
with open(lowercase , """wb""" ) as extracted_file:
shutil.copyfileobj(lowercase , lowercase )
class UpperCAmelCase__ :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
__SCREAMING_SNAKE_CASE = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def __lowerCamelCase ( cls ) -> Union[str, Any]:
return max(
len(lowercase )
for extractor in cls.extractors.values()
if issubclass(lowercase , lowercase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> str:
try:
return MagicNumberBaseExtractor.read_magic_number(lowercase , magic_number_length=lowercase )
except OSError:
return b""
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase = False ) -> bool:
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=lowercase , )
__UpperCamelCase = cls.infer_extractor_format(lowercase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def __lowerCamelCase ( cls , lowercase ) -> str: # <Added version="2.4.0"/>
__UpperCamelCase = cls._get_magic_number_max_length()
__UpperCamelCase = cls._read_magic_number(lowercase , lowercase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(lowercase , magic_number=lowercase ):
return extractor_format
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase , lowercase = None , lowercase = "deprecated" , ) -> None:
os.makedirs(os.path.dirname(lowercase ) , exist_ok=lowercase )
# Prevent parallel extractions
__UpperCamelCase = str(Path(lowercase ).with_suffix(""".lock""" ) )
with FileLock(lowercase ):
shutil.rmtree(lowercase , ignore_errors=lowercase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(lowercase , lowercase ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=lowercase , )
__UpperCamelCase = extractor if extractor != """deprecated""" else extractor_format
else:
__UpperCamelCase = cls.extractors[extractor_format]
return extractor.extract(lowercase , lowercase )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=lowercase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(lowercase ):
return extractor.extract(lowercase , lowercase )
| 349 | 1 |