code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
a = True
except ImportError:
a = False
a = logging.get_logger(__name__) # pylint: disable=invalid-name
def a_ ( __UpperCAmelCase ) -> int:
"""simple docstring"""
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class a_ ( snake_case ):
@staticmethod
def UpperCamelCase ( a_ : ArgumentParser ) -> Any:
snake_case: Tuple =parser.add_parser('add-new-model' )
add_new_model_parser.add_argument('--testing' , action='store_true' , help='If in testing mode.' )
add_new_model_parser.add_argument('--testing_file' , type=a_ , help='Configuration file on which to run.' )
add_new_model_parser.add_argument(
'--path' , type=a_ , help='Path to cookiecutter. Should only be used for testing purposes.' )
add_new_model_parser.set_defaults(func=a_ )
def __init__( self : Any , a_ : bool , a_ : str , a_ : int=None , *a_ : List[str] ) -> Dict:
snake_case: Dict =testing
snake_case: Union[str, Any] =testing_file
snake_case: Any =path
def UpperCamelCase ( self : str ) -> Optional[int]:
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.' )
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
snake_case: int =[directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:2_2]]
if len(a_ ) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.' )
snake_case: Tuple =(
Path(a_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
snake_case: Optional[int] =path_to_transformer_root / 'templates' / 'adding_a_new_model'
# Execute cookiecutter
if not self._testing:
cookiecutter(str(a_ ) )
else:
with open(self._testing_file , 'r' ) as configuration_file:
snake_case: Dict =json.load(a_ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=a_ , extra_context=a_ , )
snake_case: int =[directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:2_2]][0]
# Retrieve configuration
with open(directory + '/configuration.json' , 'r' ) as configuration_file:
snake_case: Dict =json.load(a_ )
snake_case: Tuple =configuration['lowercase_modelname']
snake_case: Union[str, Any] =configuration['generate_tensorflow_pytorch_and_flax']
os.remove(F'''{directory}/configuration.json''' )
snake_case: Tuple ='PyTorch' in generate_tensorflow_pytorch_and_flax
snake_case: Any ='TensorFlow' in generate_tensorflow_pytorch_and_flax
snake_case: Union[str, Any] ='Flax' in generate_tensorflow_pytorch_and_flax
snake_case: Optional[Any] =F'''{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'''
os.makedirs(a_ , exist_ok=a_ )
os.makedirs(F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}''' , exist_ok=a_ )
# Tests require submodules as they have parent imports
with open(F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py''' , 'w' ):
pass
shutil.move(
F'''{directory}/__init__.py''' , F'''{model_dir}/__init__.py''' , )
shutil.move(
F'''{directory}/configuration_{lowercase_model_name}.py''' , F'''{model_dir}/configuration_{lowercase_model_name}.py''' , )
def remove_copy_lines(a_ : Union[str, Any] ):
with open(a_ , 'r' ) as f:
snake_case: Union[str, Any] =f.readlines()
with open(a_ , 'w' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(a_ )
if output_pytorch:
if not self._testing:
remove_copy_lines(F'''{directory}/modeling_{lowercase_model_name}.py''' )
shutil.move(
F'''{directory}/modeling_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_{lowercase_model_name}.py''' , )
shutil.move(
F'''{directory}/test_modeling_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py''' , )
else:
os.remove(F'''{directory}/modeling_{lowercase_model_name}.py''' )
os.remove(F'''{directory}/test_modeling_{lowercase_model_name}.py''' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
shutil.move(
F'''{directory}/modeling_tf_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_tf_{lowercase_model_name}.py''' , )
shutil.move(
F'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py''' , )
else:
os.remove(F'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
os.remove(F'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' )
if output_flax:
if not self._testing:
remove_copy_lines(F'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
F'''{directory}/modeling_flax_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_flax_{lowercase_model_name}.py''' , )
shutil.move(
F'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py''' , )
else:
os.remove(F'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
os.remove(F'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
F'''{directory}/{lowercase_model_name}.md''' , F'''{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md''' , )
shutil.move(
F'''{directory}/tokenization_{lowercase_model_name}.py''' , F'''{model_dir}/tokenization_{lowercase_model_name}.py''' , )
shutil.move(
F'''{directory}/tokenization_fast_{lowercase_model_name}.py''' , F'''{model_dir}/tokenization_{lowercase_model_name}_fast.py''' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(a_ : str , a_ : str , a_ : List[str] ):
# Create temp file
snake_case , snake_case: Union[str, Any] =mkstemp()
snake_case: str =False
with fdopen(a_ , 'w' ) as new_file:
with open(a_ ) as old_file:
for line in old_file:
new_file.write(a_ )
if line_to_copy_below in line:
snake_case: int =True
for line_to_copy in lines_to_copy:
new_file.write(a_ )
if not line_found:
raise ValueError(F'''Line {line_to_copy_below} was not found in file.''' )
# Copy the file permissions from the old file to the new file
copymode(a_ , a_ )
# Remove original file
remove(a_ )
# Move new file
move(a_ , a_ )
def skip_units(a_ : List[str] ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(a_ : Any ):
with open(a_ ) as datafile:
snake_case: List[str] =[]
snake_case: Optional[int] =False
snake_case: Optional[int] =False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
snake_case: Optional[Any] =line.split('"' )[1]
snake_case: int =skip_units(a_ )
elif "# Below: " in line and "##" not in line:
snake_case: Optional[Any] =line.split('"' )[1]
snake_case: Any =skip_units(a_ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(a_ , a_ , a_ )
snake_case: Tuple =[]
elif "# Replace with" in line and "##" not in line:
snake_case: Any =[]
elif "##" not in line:
lines_to_copy.append(a_ )
remove(a_ )
replace_in_files(F'''{directory}/to_replace_{lowercase_model_name}.py''' )
os.rmdir(a_ )
| 350 |
'''simple docstring'''
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
a = [
'word_embeddings_layernorm.weight',
'word_embeddings_layernorm.bias',
'input_layernorm.weight',
'input_layernorm.bias',
'post_attention_layernorm.weight',
'post_attention_layernorm.bias',
'self_attention.dense.bias',
'mlp.dense_4h_to_h.bias',
'ln_f.weight',
'ln_f.bias',
]
a = [
'mlp.dense_4h_to_h.weight',
'self_attention.dense.weight',
]
def a_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Any:
"""simple docstring"""
snake_case: Any ={
'word_embeddings.weight': 'word_embeddings.weight',
'word_embeddings.norm.weight': 'word_embeddings_layernorm.weight',
'word_embeddings.norm.bias': 'word_embeddings_layernorm.bias',
'weight': 'ln_f.weight',
'bias': 'ln_f.bias',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
snake_case: Dict =int(re.match(R'.*layer_(\d*).*' , __UpperCAmelCase )[1] )
layer_number -= 3
return f'''h.{layer_number}.''' + key
def a_ ( __UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
if dtype == torch.bool:
return 1 / 8
snake_case: Optional[Any] =re.search(R'[^\d](\d+)$' , str(__UpperCAmelCase ) )
if bit_search is None:
raise ValueError(f'''`dtype` is not a valid dtype: {dtype}.''' )
snake_case: str =int(bit_search.groups()[0] )
return bit_size // 8
def a_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
"""simple docstring"""
if bloom_config_file == "":
snake_case: Tuple =BloomConfig()
else:
snake_case: List[Any] =BloomConfig.from_json_file(__UpperCAmelCase )
if shard_model:
snake_case: Any =os.listdir(__UpperCAmelCase )
snake_case: Any =sorted(filter(lambda __UpperCAmelCase : s.startswith('layer' ) and "model_00" in s , __UpperCAmelCase ) )
snake_case: int ={'weight_map': {}, 'metadata': {}}
snake_case: Dict =0
snake_case: List[str] =None
snake_case: Any =BloomConfig()
for j, file in enumerate(__UpperCAmelCase ):
print('Processing file: {}'.format(__UpperCAmelCase ) )
snake_case: Any =None
for i in range(__UpperCAmelCase ):
# load all TP files
snake_case: Optional[int] =file.replace('model_00' , f'''model_0{i}''' )
snake_case: List[str] =torch.load(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , map_location='cpu' )
# Rename keys in the transformers names
snake_case: int =list(temp.keys() )
for key in keys:
snake_case: List[Any] =temp.pop(__UpperCAmelCase )
if tensors is None:
snake_case: Tuple =temp
else:
for key in tensors.keys():
if any(key.endswith(__UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
snake_case: Optional[int] =1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
snake_case: str =torch.cat([tensors[key], temp[key]] , dim=__UpperCAmelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(__UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
snake_case: Dict =tensors[key] / pretraining_tp
torch.save(
__UpperCAmelCase , os.path.join(
__UpperCAmelCase , 'pytorch_model_{}-of-{}.bin'.format(str(j + 1 ).zfill(5 ) , str(len(__UpperCAmelCase ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
snake_case: List[str] =tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
snake_case: List[Any] ='pytorch_model_{}-of-{}.bin'.format(
str(j + 1 ).zfill(5 ) , str(len(__UpperCAmelCase ) ).zfill(5 ) )
snake_case: Tuple =BloomConfig()
snake_case: Union[str, Any] =pytorch_dump_folder_path + '/' + CONFIG_NAME
snake_case: Any =total_size
with open(__UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(__UpperCAmelCase , WEIGHTS_NAME + '.index.json' ) , 'w' , encoding='utf-8' ) as f:
snake_case: Tuple =json.dumps(__UpperCAmelCase , indent=2 , sort_keys=__UpperCAmelCase ) + '\n'
f.write(__UpperCAmelCase )
else:
snake_case: Optional[Any] =BloomModel(__UpperCAmelCase )
snake_case: Any =os.listdir(__UpperCAmelCase )
snake_case: Optional[int] =sorted(filter(lambda __UpperCAmelCase : s.startswith('layer' ) and "model_00" in s , __UpperCAmelCase ) )
snake_case: str =None
for i, file in enumerate(__UpperCAmelCase ):
snake_case: List[str] =None
for i in range(__UpperCAmelCase ):
# load all TP files
snake_case: int =file.replace('model_00' , f'''model_0{i}''' )
snake_case: Optional[Any] =torch.load(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , map_location='cpu' )
# Rename keys in the transformers names
snake_case: List[str] =list(temp.keys() )
for key in keys:
snake_case: int =temp.pop(__UpperCAmelCase )
if tensors is None:
snake_case: Any =temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(__UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
snake_case: List[Any] =1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
snake_case: Tuple =torch.cat([tensors[key], temp[key]] , dim=__UpperCAmelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(__UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
snake_case: int =tensors[key] / pretraining_tp
snake_case: Optional[Any] =model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase )
assert not other_keys.unexpected_keys, f'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
snake_case: Optional[Any] =set(other_keys.missing_keys )
else:
snake_case: Any =missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
snake_case: Optional[int] =pytorch_dump_folder_path + '/' + WEIGHTS_NAME
snake_case: Any =pytorch_dump_folder_path + '/' + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' )
if config.torch_dtype is not None:
snake_case: Optional[Any] =model.to(config.torch_dtype )
torch.save(model.state_dict() , __UpperCAmelCase )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(__UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bloom_checkpoint_path',
default=None,
type=str,
required=True,
help='Path to the Megatron-LM checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--bloom_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--shard_model',
action='store_true',
help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint',
)
parser.add_argument(
'--pretraining_tp',
default=4,
type=int,
help='Pretraining TP rank that has been used when training the model in Megatron-LM \n',
)
a = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 350 | 1 |
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase_ :
def __init__( self : int , _A : List[str] , _A : Tuple=13 , _A : Tuple=7 , _A : List[str]=True , _A : List[str]=True , _A : Tuple=True , _A : Any=True , _A : List[str]=True , _A : Dict=False , _A : str=False , _A : List[Any]=False , _A : int=2 , _A : Union[str, Any]=99 , _A : Dict=0 , _A : int=32 , _A : Optional[Any]=5 , _A : Dict=4 , _A : str=0.1 , _A : str=0.1 , _A : Any=512 , _A : Optional[Any]=2 , _A : Union[str, Any]=0.0_2 , _A : Optional[Any]=2 , _A : Optional[Any]=4 , _A : Optional[Any]="last" , _A : Optional[Any]=True , _A : Dict=None , _A : Optional[int]=0 , ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = parent
UpperCAmelCase__ : Any = batch_size
UpperCAmelCase__ : int = seq_length
UpperCAmelCase__ : Union[str, Any] = is_training
UpperCAmelCase__ : Tuple = use_input_lengths
UpperCAmelCase__ : Any = use_token_type_ids
UpperCAmelCase__ : Union[str, Any] = use_labels
UpperCAmelCase__ : Tuple = gelu_activation
UpperCAmelCase__ : Union[str, Any] = sinusoidal_embeddings
UpperCAmelCase__ : Optional[Any] = causal
UpperCAmelCase__ : Union[str, Any] = asm
UpperCAmelCase__ : Dict = n_langs
UpperCAmelCase__ : Union[str, Any] = vocab_size
UpperCAmelCase__ : Dict = n_special
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Optional[int] = num_hidden_layers
UpperCAmelCase__ : List[Any] = num_attention_heads
UpperCAmelCase__ : str = hidden_dropout_prob
UpperCAmelCase__ : Dict = attention_probs_dropout_prob
UpperCAmelCase__ : str = max_position_embeddings
UpperCAmelCase__ : Optional[int] = type_sequence_label_size
UpperCAmelCase__ : int = initializer_range
UpperCAmelCase__ : Optional[int] = num_labels
UpperCAmelCase__ : Union[str, Any] = num_choices
UpperCAmelCase__ : Dict = summary_type
UpperCAmelCase__ : Dict = use_proj
UpperCAmelCase__ : int = scope
UpperCAmelCase__ : str = bos_token_id
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : Optional[Any] = None
if self.use_input_lengths:
UpperCAmelCase__ : Tuple = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase__ : List[str] = None
if self.use_token_type_ids:
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCAmelCase__ : int = None
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : Optional[Any] = None
if self.use_labels:
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , 2 ).float()
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ : Tuple = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowercase_ ( self : Any ):
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def lowercase_ ( self : str , _A : Union[str, Any] , _A : Optional[Any] , _A : Dict , _A : Optional[Any] , _A : Optional[Any] , _A : List[str] , _A : List[Any] , _A : List[Any] , _A : List[Any] , ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = XLMModel(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : Optional[Any] = model(_A , lengths=_A , langs=_A )
UpperCAmelCase__ : Optional[Any] = model(_A , langs=_A )
UpperCAmelCase__ : List[Any] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : Tuple , _A : Dict , _A : Tuple , _A : Tuple , _A : Optional[Any] , _A : Dict , _A : Optional[int] , _A : Dict , _A : List[str] , _A : Any , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = XLMWithLMHeadModel(_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : int = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : Tuple , _A : Dict , _A : Any , _A : str , _A : int , _A : Tuple , _A : List[str] , _A : List[str] , _A : Dict , _A : Optional[Any] , ):
'''simple docstring'''
UpperCAmelCase__ : int = XLMForQuestionAnsweringSimple(_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : List[Any] = model(_A )
UpperCAmelCase__ : Any = model(_A , start_positions=_A , end_positions=_A )
UpperCAmelCase__ : Any = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self : Optional[int] , _A : List[str] , _A : int , _A : int , _A : Tuple , _A : Union[str, Any] , _A : List[str] , _A : str , _A : List[str] , _A : Tuple , ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = XLMForQuestionAnswering(_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : List[str] = model(_A )
UpperCAmelCase__ : Union[str, Any] = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , p_mask=_A , )
UpperCAmelCase__ : int = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , )
(UpperCAmelCase__ ) : Optional[Any] = result_with_labels.to_tuple()
UpperCAmelCase__ : Dict = model(_A , start_positions=_A , end_positions=_A )
(UpperCAmelCase__ ) : Optional[Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def lowercase_ ( self : int , _A : Union[str, Any] , _A : Tuple , _A : Any , _A : Optional[Any] , _A : int , _A : List[str] , _A : str , _A : Tuple , _A : Optional[Any] , ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = XLMForSequenceClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : List[str] = model(_A )
UpperCAmelCase__ : Optional[Any] = model(_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase_ ( self : Dict , _A : List[str] , _A : Tuple , _A : int , _A : Optional[Any] , _A : int , _A : List[str] , _A : int , _A : int , _A : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase__ : int = self.num_labels
UpperCAmelCase__ : Any = XLMForTokenClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : List[str] = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self : List[Any] , _A : str , _A : Tuple , _A : Dict , _A : Any , _A : List[Any] , _A : Any , _A : Any , _A : List[Any] , _A : Tuple , ):
'''simple docstring'''
UpperCAmelCase__ : str = self.num_choices
UpperCAmelCase__ : List[Any] = XLMForMultipleChoice(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : Tuple = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : int = self.prepare_config_and_inputs()
(
UpperCAmelCase__
) : Any = config_and_inputs
UpperCAmelCase__ : List[str] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( __a , __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowerCAmelCase__ = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase_ ( self : List[str] , _A : Any , _A : Optional[int] , _A : List[str] , _A : Optional[int] , _A : List[Any] ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowercase_ ( self : str , _A : Any , _A : str , _A : Union[str, Any]=False ):
'''simple docstring'''
UpperCAmelCase__ : Dict = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
UpperCAmelCase__ : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
UpperCAmelCase__ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = XLMModelTester(self )
UpperCAmelCase__ : Union[str, Any] = ConfigTester(self , config_class=_A , emb_dim=37 )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*_A )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*_A )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*_A )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*_A )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*_A )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*_A )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*_A )
def lowercase_ ( self : Tuple , _A : Any , _A : Optional[int] , _A : Any , _A : int , _A : Dict , _A : List[Any]=False , _A : List[Any]=1 ):
'''simple docstring'''
self.assertIsInstance(_A , _A )
self.assertListEqual(
[isinstance(_A , _A ) for iter_attentions in attentions] , [True] * len(_A ) )
self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(_A ):
# adds PAD dummy token
UpperCAmelCase__ : str = min_length + idx + 1
UpperCAmelCase__ : str = min_length + idx + 1
UpperCAmelCase__ : List[str] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_A ) )
def lowercase_ ( self : Dict , _A : Union[str, Any] , _A : str , _A : str , _A : Optional[Any] , _A : int , _A : Tuple=False , _A : Union[str, Any]=1 ):
'''simple docstring'''
self.assertIsInstance(_A , _A )
self.assertListEqual(
[isinstance(_A , _A ) for iter_hidden_states in hidden_states] , [True] * len(_A ) , )
self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(_A ):
# adds PAD dummy token
UpperCAmelCase__ : List[str] = min_length + idx + 1
UpperCAmelCase__ : Optional[Any] = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_A ) , )
pass
@slow
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : List[Any] = XLMModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' )
model.to(_A )
UpperCAmelCase__ : Optional[Any] = torch.tensor([[14, 447]] , dtype=torch.long , device=_A ) # the president
UpperCAmelCase__ : Tuple = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
UpperCAmelCase__ : List[Any] = model.generate(_A , do_sample=_A )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _A )
| 720 |
'''simple docstring'''
def a__ ( lowerCAmelCase__ ) -> float:
if not nums: # Makes sure that the list is not empty
raise ValueError('''List is empty''' )
UpperCAmelCase__ : Tuple = sum(lowerCAmelCase__ ) / len(lowerCAmelCase__ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312 | 0 |
"""simple docstring"""
def a ( __UpperCAmelCase : str ) -> str:
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 96 |
"""simple docstring"""
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def a ( __UpperCAmelCase : bytes , __UpperCAmelCase : int ) -> np.array:
__magic_name__: Optional[int] = f'{sampling_rate}'
__magic_name__: Union[str, Any] = """1"""
__magic_name__: Tuple = """f32le"""
__magic_name__: Optional[Any] = [
"""ffmpeg""",
"""-i""",
"""pipe:0""",
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
try:
with subprocess.Popen(__UpperCAmelCase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
__magic_name__: Any = ffmpeg_process.communicate(__UpperCAmelCase )
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error
__magic_name__: Optional[Any] = output_stream[0]
__magic_name__: Optional[int] = np.frombuffer(__UpperCAmelCase , np.floataa )
if audio.shape[0] == 0:
raise ValueError("""Malformed soundfile""" )
return audio
def a ( __UpperCAmelCase : int , __UpperCAmelCase : float , __UpperCAmelCase : str = "f32le" , ) -> Optional[int]:
__magic_name__: List[Any] = f'{sampling_rate}'
__magic_name__: Optional[int] = """1"""
if format_for_conversion == "s16le":
__magic_name__: Dict = 2
elif format_for_conversion == "f32le":
__magic_name__: List[str] = 4
else:
raise ValueError(f'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
__magic_name__: str = platform.system()
if system == "Linux":
__magic_name__: Optional[Any] = """alsa"""
__magic_name__: Optional[Any] = """default"""
elif system == "Darwin":
__magic_name__: Union[str, Any] = """avfoundation"""
__magic_name__: Dict = """:0"""
elif system == "Windows":
__magic_name__: Any = """dshow"""
__magic_name__: Union[str, Any] = """default"""
__magic_name__: Union[str, Any] = [
"""ffmpeg""",
"""-f""",
format_,
"""-i""",
input_,
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-fflags""",
"""nobuffer""",
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
__magic_name__: Tuple = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
__magic_name__: Union[str, Any] = _ffmpeg_stream(__UpperCAmelCase , __UpperCAmelCase )
for item in iterator:
yield item
def a ( __UpperCAmelCase : int , __UpperCAmelCase : float , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[Union[Tuple[float, float], float]] = None , __UpperCAmelCase : str = "f32le" , ) -> List[Any]:
if stream_chunk_s is not None:
__magic_name__: Optional[int] = stream_chunk_s
else:
__magic_name__: List[str] = chunk_length_s
__magic_name__: Optional[int] = ffmpeg_microphone(__UpperCAmelCase , __UpperCAmelCase , format_for_conversion=__UpperCAmelCase )
if format_for_conversion == "s16le":
__magic_name__: str = np.intaa
__magic_name__: str = 2
elif format_for_conversion == "f32le":
__magic_name__: Optional[Any] = np.floataa
__magic_name__: Dict = 4
else:
raise ValueError(f'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
if stride_length_s is None:
__magic_name__: List[Any] = chunk_length_s / 6
__magic_name__: Optional[Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__UpperCAmelCase , (int, float) ):
__magic_name__: Union[str, Any] = [stride_length_s, stride_length_s]
__magic_name__: List[str] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
__magic_name__: List[Any] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
__magic_name__: List[str] = datetime.datetime.now()
__magic_name__: Optional[Any] = datetime.timedelta(seconds=__UpperCAmelCase )
for item in chunk_bytes_iter(__UpperCAmelCase , __UpperCAmelCase , stride=(stride_left, stride_right) , stream=__UpperCAmelCase ):
# Put everything back in numpy scale
__magic_name__: int = np.frombuffer(item["""raw"""] , dtype=__UpperCAmelCase )
__magic_name__: int = (
item["""stride"""][0] // size_of_sample,
item["""stride"""][1] // size_of_sample,
)
__magic_name__: Union[str, Any] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 1_0 * delta:
# We're late !! SKIP
continue
yield item
def a ( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : Tuple[int, int] , __UpperCAmelCase : bool = False ) -> Union[str, Any]:
__magic_name__: Tuple = B""""""
__magic_name__, __magic_name__: int = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}' )
__magic_name__: int = 0
for raw in iterator:
acc += raw
if stream and len(__UpperCAmelCase ) < chunk_len:
__magic_name__: Optional[int] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__UpperCAmelCase ) >= chunk_len:
# We are flushing the accumulator
__magic_name__: Union[str, Any] = (_stride_left, stride_right)
__magic_name__: Any = {"""raw""": acc[:chunk_len], """stride""": stride}
if stream:
__magic_name__: str = False
yield item
__magic_name__: str = stride_left
__magic_name__: Optional[Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__UpperCAmelCase ) > stride_left:
__magic_name__: List[Any] = {"""raw""": acc, """stride""": (_stride_left, 0)}
if stream:
__magic_name__: Union[str, Any] = False
yield item
def a ( __UpperCAmelCase : Tuple , __UpperCAmelCase : int ) -> List[Any]:
__magic_name__: str = 2**2_4 # 16Mo
try:
with subprocess.Popen(__UpperCAmelCase , stdout=subprocess.PIPE , bufsize=__UpperCAmelCase ) as ffmpeg_process:
while True:
__magic_name__: Dict = ffmpeg_process.stdout.read(__UpperCAmelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
| 96 | 1 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
_a : Dict = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
_a : List[str] = {
'169M': 768,
'430M': 1_024,
'1B5': 2_048,
'3B': 2_560,
'7B': 4_096,
'14B': 5_120,
}
def a_ ( __magic_name__ ) -> List[str]:
"""simple docstring"""
snake_case : List[Any] = list(state_dict.keys() )
for name in state_dict_keys:
snake_case : Tuple = state_dict.pop(__magic_name__ )
# emb -> embedding
if name.startswith('''emb.''' ):
snake_case : int = name.replace('''emb.''' , '''embeddings.''' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('''blocks.0.ln0''' ):
snake_case : List[str] = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' )
# att -> attention
snake_case : Union[str, Any] = re.sub(R'''blocks\.(\d+)\.att''' , R'''blocks.\1.attention''' , __magic_name__ )
# ffn -> feed_forward
snake_case : Dict = re.sub(R'''blocks\.(\d+)\.ffn''' , R'''blocks.\1.feed_forward''' , __magic_name__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith('''.time_mix_k''' ):
snake_case : Optional[int] = name.replace('''.time_mix_k''' , '''.time_mix_key''' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('''.time_mix_v''' ):
snake_case : List[str] = name.replace('''.time_mix_v''' , '''.time_mix_value''' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('''.time_mix_r''' ):
snake_case : List[Any] = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' )
if name != "head.weight":
snake_case : Tuple = '''rwkv.''' + name
snake_case : List[str] = weight
return state_dict
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=False , __magic_name__=None ) -> Tuple:
"""simple docstring"""
if tokenizer_file is None:
print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' )
snake_case : str = 50_277
snake_case : str = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' )
else:
snake_case : Tuple = PreTrainedTokenizerFast(tokenizer_file=__magic_name__ )
snake_case : str = len(__magic_name__ )
tokenizer.save_pretrained(__magic_name__ )
# 2. Build the config
snake_case : Optional[int] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
snake_case : Union[str, Any] = candidate
break
if size is None:
raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' )
if size not in possible_sizes:
raise ValueError(F"`size` should be one of {possible_sizes}, got {size}." )
snake_case : Union[str, Any] = RwkvConfig(
vocab_size=__magic_name__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__magic_name__ )
# 3. Download model file then convert state_dict
snake_case : Union[str, Any] = hf_hub_download(__magic_name__ , __magic_name__ )
snake_case : List[Any] = torch.load(__magic_name__ , map_location='''cpu''' )
snake_case : Tuple = convert_state_dict(__magic_name__ )
# 4. Split in shards and save
snake_case , snake_case : Optional[Any] = shard_checkpoint(__magic_name__ )
for shard_file, shard in shards.items():
torch.save(__magic_name__ , os.path.join(__magic_name__ , __magic_name__ ) )
if index is not None:
snake_case : Dict = os.path.join(__magic_name__ , __magic_name__ )
# Save the index as well
with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as f:
snake_case : Optional[int] = json.dumps(__magic_name__ , indent=2 , sort_keys=__magic_name__ ) + '''\n'''
f.write(__magic_name__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' )
snake_case : Tuple = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
snake_case : Union[str, Any] = torch.load(os.path.join(__magic_name__ , __magic_name__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__magic_name__ , __magic_name__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' )
snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained(__magic_name__ )
model.push_to_hub(__magic_name__ , max_shard_size='''2GB''' )
tokenizer.push_to_hub(__magic_name__ )
if __name__ == "__main__":
_a : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
_a : int = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 84 |
import string
import numpy
def a_ ( __magic_name__ , __magic_name__ ) -> int:
"""simple docstring"""
return b if a == 0 else greatest_common_divisor(b % a , __magic_name__ )
class a_ :
A__ : List[Any] = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
A__ : List[str] = numpy.vectorize(lambda a : x % 36 )
A__ : Dict = numpy.vectorize(a )
def __init__( self : List[str] , UpperCAmelCase__ : numpy.ndarray ):
"""simple docstring"""
snake_case : int = self.modulus(UpperCAmelCase__ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
snake_case : List[str] = encrypt_key.shape[0]
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : str ):
"""simple docstring"""
return self.key_string.index(UpperCAmelCase__ )
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : int ):
"""simple docstring"""
return self.key_string[round(UpperCAmelCase__ )]
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : List[Any] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
snake_case : Tuple = det % len(self.key_string )
snake_case : Tuple = len(self.key_string )
if greatest_common_divisor(UpperCAmelCase__ , len(self.key_string ) ) != 1:
snake_case : List[Any] = (
F"determinant modular {req_l} of encryption key({det}) "
F"is not co prime w.r.t {req_l}.\nTry another key."
)
raise ValueError(UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : str ):
"""simple docstring"""
snake_case : Optional[int] = [char for char in text.upper() if char in self.key_string]
snake_case : Optional[int] = chars[-1]
while len(UpperCAmelCase__ ) % self.break_key != 0:
chars.append(UpperCAmelCase__ )
return "".join(UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : str ):
"""simple docstring"""
snake_case : Optional[int] = self.process_text(text.upper() )
snake_case : Optional[int] = ''''''
for i in range(0 , len(UpperCAmelCase__ ) - self.break_key + 1 , self.break_key ):
snake_case : int = text[i : i + self.break_key]
snake_case : int = [self.replace_letters(UpperCAmelCase__ ) for char in batch]
snake_case : Tuple = numpy.array([vec] ).T
snake_case : Optional[Any] = self.modulus(self.encrypt_key.dot(UpperCAmelCase__ ) ).T.tolist()[
0
]
snake_case : Dict = ''''''.join(
self.replace_digits(UpperCAmelCase__ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : Optional[int] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
snake_case : int = det % len(self.key_string )
snake_case : Dict = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
snake_case : Any = i
break
snake_case : Any = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(UpperCAmelCase__ ) )
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : str ):
"""simple docstring"""
snake_case : Any = self.make_decrypt_key()
snake_case : Optional[Any] = self.process_text(text.upper() )
snake_case : int = ''''''
for i in range(0 , len(UpperCAmelCase__ ) - self.break_key + 1 , self.break_key ):
snake_case : Any = text[i : i + self.break_key]
snake_case : int = [self.replace_letters(UpperCAmelCase__ ) for char in batch]
snake_case : List[str] = numpy.array([vec] ).T
snake_case : Optional[Any] = self.modulus(decrypt_key.dot(UpperCAmelCase__ ) ).T.tolist()[0]
snake_case : int = ''''''.join(
self.replace_digits(UpperCAmelCase__ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def a_ ( ) -> None:
"""simple docstring"""
snake_case : Any = int(input('''Enter the order of the encryption key: ''' ) )
snake_case : List[Any] = []
print('''Enter each row of the encryption key with space separated integers''' )
for _ in range(__magic_name__ ):
snake_case : Optional[Any] = [int(__magic_name__ ) for x in input().split()]
hill_matrix.append(__magic_name__ )
snake_case : List[str] = HillCipher(numpy.array(__magic_name__ ) )
print('''Would you like to encrypt or decrypt some text? (1 or 2)''' )
snake_case : int = input('''\n1. Encrypt\n2. Decrypt\n''' )
if option == "1":
snake_case : List[Any] = input('''What text would you like to encrypt?: ''' )
print('''Your encrypted text is:''' )
print(hc.encrypt(__magic_name__ ) )
elif option == "2":
snake_case : int = input('''What text would you like to decrypt?: ''' )
print('''Your decrypted text is:''' )
print(hc.decrypt(__magic_name__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 84 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ : List[Any] = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : int = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
UpperCAmelCase__ : int = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
UpperCAmelCase__ : str = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
UpperCAmelCase__ : Any = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 223 |
'''simple docstring'''
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
_UpperCamelCase : Union[str, Any] = (
'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'
)
_UpperCamelCase : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
def __UpperCAmelCase ( ) -> List[str]:
UpperCAmelCase_ : int = '''https://pypi.org/pypi/diffusers/json'''
UpperCAmelCase_ : List[str] = json.loads(request.urlopen(A ).read() )['''releases'''].keys()
return sorted(A , key=lambda A : version.Version(A ) )
def __UpperCAmelCase ( ) -> List[Any]:
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(A )
os.makedirs(A , exist_ok=A )
UpperCAmelCase_ : Tuple = Path(A ) / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def __UpperCAmelCase ( A : Union[str, os.PathLike] ) -> str:
init_hf_modules()
UpperCAmelCase_ : Any = Path(A ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(A , exist_ok=A )
UpperCAmelCase_ : str = dynamic_module_path / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def __UpperCAmelCase ( A : Dict ) -> Optional[int]:
with open(A , '''r''' , encoding='''utf-8''' ) as f:
UpperCAmelCase_ : List[Any] = f.read()
# Imports of the form `import .xxx`
UpperCAmelCase_ : Any = re.findall('''^\s*import\s+\.(\S+)\s*$''' , A , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , A , flags=re.MULTILINE )
# Unique-ify
return list(set(A ) )
def __UpperCAmelCase ( A : Tuple ) -> List[str]:
UpperCAmelCase_ : Dict = False
UpperCAmelCase_ : List[str] = [module_file]
UpperCAmelCase_ : str = []
# Let's recurse through all relative imports
while not no_change:
UpperCAmelCase_ : Any = []
for f in files_to_check:
new_imports.extend(get_relative_imports(A ) )
UpperCAmelCase_ : Tuple = Path(A ).parent
UpperCAmelCase_ : Union[str, Any] = [str(module_path / m ) for m in new_imports]
UpperCAmelCase_ : str = [f for f in new_import_files if f not in all_relative_imports]
UpperCAmelCase_ : str = [F"{f}.py" for f in new_import_files]
UpperCAmelCase_ : Optional[int] = len(A ) == 0
all_relative_imports.extend(A )
return all_relative_imports
def __UpperCAmelCase ( A : List[str] ) -> Union[str, Any]:
with open(A , '''r''' , encoding='''utf-8''' ) as f:
UpperCAmelCase_ : List[str] = f.read()
# Imports of the form `import xxx`
UpperCAmelCase_ : Optional[Any] = re.findall('''^\s*import\s+(\S+)\s*$''' , A , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('''^\s*from\s+(\S+)\s+import''' , A , flags=re.MULTILINE )
# Only keep the top-level module
UpperCAmelCase_ : List[str] = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )]
# Unique-ify and test we got them all
UpperCAmelCase_ : str = list(set(A ) )
UpperCAmelCase_ : int = []
for imp in imports:
try:
importlib.import_module(A )
except ImportError:
missing_packages.append(A )
if len(A ) > 0:
raise ImportError(
'''This modeling file requires the following packages that were not found in your environment: '''
F"{', '.join(A )}. Run `pip install {' '.join(A )}`" )
return get_relative_imports(A )
def __UpperCAmelCase ( A : Optional[Any] , A : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ : int = module_path.replace(os.path.sep , '''.''' )
UpperCAmelCase_ : int = importlib.import_module(A )
if class_name is None:
return find_pipeline_class(A )
return getattr(A , A )
def __UpperCAmelCase ( A : str ) -> List[str]:
from ..pipelines import DiffusionPipeline
UpperCAmelCase_ : List[Any] = dict(inspect.getmembers(A , inspect.isclass ) )
UpperCAmelCase_ : Optional[int] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , A )
and cls.__module__.split('''.''' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F"Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"
F" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"
F" {loaded_module}." )
UpperCAmelCase_ : int = cls
return pipeline_class
def __UpperCAmelCase ( A : Union[str, os.PathLike] , A : str , A : Optional[Union[str, os.PathLike]] = None , A : bool = False , A : bool = False , A : Optional[Dict[str, str]] = None , A : Optional[Union[bool, str]] = None , A : Optional[str] = None , A : bool = False , ) -> str:
UpperCAmelCase_ : Any = str(A )
UpperCAmelCase_ : List[Any] = os.path.join(A , A )
if os.path.isfile(A ):
UpperCAmelCase_ : Optional[Any] = module_file_or_url
UpperCAmelCase_ : List[str] = '''local'''
elif pretrained_model_name_or_path.count('''/''' ) == 0:
UpperCAmelCase_ : List[str] = get_diffusers_versions()
# cut ".dev0"
UpperCAmelCase_ : Dict = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] )
# retrieve github version that matches
if revision is None:
UpperCAmelCase_ : List[str] = latest_version if latest_version[1:] in available_versions else '''main'''
logger.info(F"Defaulting to latest_version: {revision}." )
elif revision in available_versions:
UpperCAmelCase_ : List[str] = F"v{revision}"
elif revision == "main":
UpperCAmelCase_ : Dict = revision
else:
raise ValueError(
F"`custom_revision`: {revision} does not exist. Please make sure to choose one of"
F" {', '.join(available_versions + ['main'] )}." )
# community pipeline on GitHub
UpperCAmelCase_ : Tuple = COMMUNITY_PIPELINES_URL.format(revision=A , pipeline=A )
try:
UpperCAmelCase_ : List[Any] = cached_download(
A , cache_dir=A , force_download=A , proxies=A , resume_download=A , local_files_only=A , use_auth_token=A , )
UpperCAmelCase_ : List[str] = '''git'''
UpperCAmelCase_ : Union[str, Any] = pretrained_model_name_or_path + '''.py'''
except EnvironmentError:
logger.error(F"Could not locate the {module_file} inside {pretrained_model_name_or_path}." )
raise
else:
try:
# Load from URL or cache if already cached
UpperCAmelCase_ : List[str] = hf_hub_download(
A , A , cache_dir=A , force_download=A , proxies=A , resume_download=A , local_files_only=A , use_auth_token=A , )
UpperCAmelCase_ : Tuple = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) )
except EnvironmentError:
logger.error(F"Could not locate the {module_file} inside {pretrained_model_name_or_path}." )
raise
# Check we have all the requirements in our environment
UpperCAmelCase_ : Any = check_imports(A )
# Now we move the module inside our cached dynamic modules.
UpperCAmelCase_ : List[str] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(A )
UpperCAmelCase_ : Optional[Any] = Path(A ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(A , submodule_path / module_file )
for module_needed in modules_needed:
UpperCAmelCase_ : int = F"{module_needed}.py"
shutil.copy(os.path.join(A , A ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(A , A ):
UpperCAmelCase_ : Tuple = use_auth_token
elif use_auth_token is True:
UpperCAmelCase_ : Tuple = HfFolder.get_token()
else:
UpperCAmelCase_ : str = None
UpperCAmelCase_ : List[Any] = model_info(A , revision=A , token=A ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
UpperCAmelCase_ : Optional[Any] = submodule_path / commit_hash
UpperCAmelCase_ : str = full_submodule + os.path.sep + commit_hash
create_dynamic_module(A )
if not (submodule_path / module_file).exists():
shutil.copy(A , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
A , F"{module_needed}.py" , cache_dir=A , force_download=A , resume_download=A , proxies=A , use_auth_token=A , revision=A , local_files_only=A , )
return os.path.join(A , A )
def __UpperCAmelCase ( A : Union[str, os.PathLike] , A : str , A : Optional[str] = None , A : Optional[Union[str, os.PathLike]] = None , A : bool = False , A : bool = False , A : Optional[Dict[str, str]] = None , A : Optional[Union[bool, str]] = None , A : Optional[str] = None , A : bool = False , **A : Optional[int] , ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = get_cached_module_file(
A , A , cache_dir=A , force_download=A , resume_download=A , proxies=A , use_auth_token=A , revision=A , local_files_only=A , )
return get_class_in_module(A , final_module.replace('''.py''' , '''''' ) )
| 541 | 0 |
'''simple docstring'''
import argparse
import copy
def lowercase_ ( lowercase__ ) ->str:
_snake_case: Union[str, Any] = {}
with open(lowercase__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
_snake_case: int = []
_list.append([line.split()[1], line.split()[2]] )
_snake_case: Tuple = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
_snake_case: int = []
_list.append([line.split()[0], line.split()[2]] )
_snake_case: List[Any] = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def lowercase_ ( lowercase__ , lowercase__ ) ->str:
with open(lowercase__ ) as f:
_snake_case: str = f.read(1 )
_snake_case: Any = start_node
_snake_case: Dict = []
_snake_case: Optional[int] = start_node
_snake_case: str = 0
while visiting not in first_solution:
_snake_case: Tuple = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(lowercase__ ) and k[0] not in first_solution:
_snake_case: str = k[1]
_snake_case: int = k[0]
first_solution.append(lowercase__ )
_snake_case: Union[str, Any] = distance_of_first_solution + int(lowercase__ )
_snake_case: int = best_node
first_solution.append(lowercase__ )
_snake_case: Tuple = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
_snake_case: str = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def lowercase_ ( lowercase__ , lowercase__ ) ->Any:
_snake_case: Optional[Any] = []
for n in solution[1:-1]:
_snake_case: Union[str, Any] = solution.index(lowercase__ )
for kn in solution[1:-1]:
_snake_case: Union[str, Any] = solution.index(lowercase__ )
if n == kn:
continue
_snake_case: Any = copy.deepcopy(lowercase__ )
_snake_case: Tuple = kn
_snake_case: Optional[int] = n
_snake_case: str = 0
for k in _tmp[:-1]:
_snake_case: str = _tmp[_tmp.index(lowercase__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
_snake_case: Optional[Any] = distance + int(i[1] )
_tmp.append(lowercase__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
_snake_case: Union[str, Any] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda lowercase__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def lowercase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) ->int:
_snake_case: Optional[Any] = 1
_snake_case: Optional[Any] = first_solution
_snake_case: List[Any] = []
_snake_case: Optional[Any] = distance_of_first_solution
_snake_case: Optional[Any] = solution
while count <= iters:
_snake_case: Tuple = find_neighborhood(lowercase__ , lowercase__ )
_snake_case: int = 0
_snake_case: Optional[Any] = neighborhood[index_of_best_solution]
_snake_case: Union[str, Any] = len(lowercase__ ) - 1
_snake_case: Union[str, Any] = False
while not found:
_snake_case: str = 0
while i < len(lowercase__ ):
if best_solution[i] != solution[i]:
_snake_case: Tuple = best_solution[i]
_snake_case: Optional[int] = solution[i]
break
_snake_case: Dict = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
_snake_case: Optional[Any] = True
_snake_case: Tuple = best_solution[:-1]
_snake_case: int = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
_snake_case: str = cost
_snake_case: Any = solution
else:
_snake_case: List[str] = index_of_best_solution + 1
_snake_case: Union[str, Any] = neighborhood[index_of_best_solution]
if len(lowercase__ ) >= size:
tabu_list.pop(0 )
_snake_case: Tuple = count + 1
return best_solution_ever, best_cost
def lowercase_ ( lowercase__=None ) ->List[str]:
_snake_case: Optional[int] = generate_neighbours(args.File )
_snake_case , _snake_case: Union[str, Any] = generate_first_solution(
args.File , lowercase__ )
_snake_case , _snake_case: Dict = tabu_search(
lowercase__ , lowercase__ , lowercase__ , args.Iterations , args.Size , )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser(description='Tabu Search')
parser.add_argument(
'-f',
'--File',
type=str,
help='Path to the file containing the data',
required=True,
)
parser.add_argument(
'-i',
'--Iterations',
type=int,
help='How many iterations the algorithm should perform',
required=True,
)
parser.add_argument(
'-s', '--Size', type=int, help='Size of the tabu list', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 273 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase :
def __init__( self : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : str=13 , __snake_case : Union[str, Any]=30 , __snake_case : Union[str, Any]=2 , __snake_case : Dict=3 , __snake_case : Optional[Any]=True , __snake_case : Optional[int]=True , __snake_case : Optional[int]=32 , __snake_case : Optional[int]=5 , __snake_case : Any=4 , __snake_case : int=37 , __snake_case : int="gelu" , __snake_case : Union[str, Any]=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : int=10 , __snake_case : Any=0.02 , __snake_case : List[str]=None , __snake_case : Tuple=2 , ):
'''simple docstring'''
_snake_case: Optional[Any] = parent
_snake_case: Tuple = batch_size
_snake_case: str = image_size
_snake_case: int = patch_size
_snake_case: Union[str, Any] = num_channels
_snake_case: Dict = is_training
_snake_case: Optional[Any] = use_labels
_snake_case: Optional[Any] = hidden_size
_snake_case: Tuple = num_hidden_layers
_snake_case: List[Any] = num_attention_heads
_snake_case: Union[str, Any] = intermediate_size
_snake_case: List[str] = hidden_act
_snake_case: Tuple = hidden_dropout_prob
_snake_case: List[Any] = attention_probs_dropout_prob
_snake_case: str = type_sequence_label_size
_snake_case: Any = initializer_range
_snake_case: str = scope
_snake_case: Union[str, Any] = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_snake_case: Tuple = (image_size // patch_size) ** 2
_snake_case: List[str] = num_patches + 1
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
_snake_case: List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case: List[str] = None
if self.use_labels:
_snake_case: Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case: Union[str, Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , __snake_case : int , __snake_case : Optional[int] , __snake_case : List[str] ):
'''simple docstring'''
_snake_case: Dict = ViTModel(config=__snake_case )
model.to(__snake_case )
model.eval()
_snake_case: Tuple = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : int ):
'''simple docstring'''
_snake_case: int = ViTForMaskedImageModeling(config=__snake_case )
model.to(__snake_case )
model.eval()
_snake_case: Dict = model(__snake_case )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_snake_case: List[str] = 1
_snake_case: Tuple = ViTForMaskedImageModeling(__snake_case )
model.to(__snake_case )
model.eval()
_snake_case: List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case: Union[str, Any] = model(__snake_case )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE_ ( self : Dict , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Dict ):
'''simple docstring'''
_snake_case: Optional[int] = self.type_sequence_label_size
_snake_case: Union[str, Any] = ViTForImageClassification(__snake_case )
model.to(__snake_case )
model.eval()
_snake_case: List[Any] = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_snake_case: Tuple = 1
_snake_case: Optional[int] = ViTForImageClassification(__snake_case )
model.to(__snake_case )
model.eval()
_snake_case: List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case: Any = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
_snake_case: Any = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
): int = config_and_inputs
_snake_case: Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
_snake_case: Optional[int] = ViTModelTester(self )
_snake_case: Union[str, Any] = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
_snake_case , _snake_case: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case: Optional[int] = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_snake_case: Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
_snake_case , _snake_case: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case: int = model_class(__snake_case )
_snake_case: List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case: List[Any] = [*signature.parameters.keys()]
_snake_case: str = ['pixel_values']
self.assertListEqual(arg_names[:1] , __snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
_snake_case: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case: Any = ViTModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def lowercase_ ( ) ->List[Any]:
_snake_case: Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCamelCase ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
_snake_case: Optional[int] = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ).to(__snake_case )
_snake_case: Dict = self.default_image_processor
_snake_case: Optional[Any] = prepare_img()
_snake_case: List[str] = image_processor(images=__snake_case , return_tensors='pt' ).to(__snake_case )
# forward pass
with torch.no_grad():
_snake_case: Optional[int] = model(**__snake_case )
# verify the logits
_snake_case: Union[str, Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __snake_case )
_snake_case: Dict = torch.tensor([-0.2_744, 0.8_215, -0.0_836] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case: str = ViTModel.from_pretrained('facebook/dino-vits8' ).to(__snake_case )
_snake_case: Any = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=4_80 )
_snake_case: Optional[int] = prepare_img()
_snake_case: Dict = image_processor(images=__snake_case , return_tensors='pt' )
_snake_case: Optional[Any] = inputs.pixel_values.to(__snake_case )
# forward pass
with torch.no_grad():
_snake_case: str = model(__snake_case , interpolate_pos_encoding=__snake_case )
# verify the logits
_snake_case: List[str] = torch.Size((1, 36_01, 3_84) )
self.assertEqual(outputs.last_hidden_state.shape , __snake_case )
_snake_case: Any = torch.tensor(
[[4.2_340, 4.3_906, -6.6_692], [4.5_463, 1.8_928, -6.7_257], [4.4_429, 0.8_496, -5.8_585]] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __snake_case , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
_snake_case: List[Any] = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto' )
_snake_case: Dict = self.default_image_processor
_snake_case: Any = prepare_img()
_snake_case: str = image_processor(images=__snake_case , return_tensors='pt' )
_snake_case: Any = inputs.pixel_values.to(__snake_case )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_snake_case: int = model(__snake_case )
| 273 | 1 |
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class UpperCamelCase__ ( nn.Module):
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase_ : int = 1_6 , UpperCamelCase_ : int = 8_8 , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : int = 1 , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : int = 3_2 , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : str = "geglu" , UpperCamelCase_ : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
__magic_name__ = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=UpperCamelCase_ , attention_head_dim=UpperCamelCase_ , in_channels=UpperCamelCase_ , num_layers=UpperCamelCase_ , dropout=UpperCamelCase_ , norm_num_groups=UpperCamelCase_ , cross_attention_dim=UpperCamelCase_ , attention_bias=UpperCamelCase_ , sample_size=UpperCamelCase_ , num_vector_embeds=UpperCamelCase_ , activation_fn=UpperCamelCase_ , num_embeds_ada_norm=UpperCamelCase_ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
__magic_name__ = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
__magic_name__ = [7_7, 2_5_7]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
__magic_name__ = [1, 0]
def a__ ( self : List[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : str=None , UpperCamelCase_ : bool = True , ):
'''simple docstring'''
__magic_name__ = hidden_states
__magic_name__ = []
__magic_name__ = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
__magic_name__ = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
__magic_name__ = self.transformer_index_for_condition[i]
__magic_name__ = self.transformers[transformer_index](
UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , timestep=UpperCamelCase_ , cross_attention_kwargs=UpperCamelCase_ , return_dict=UpperCamelCase_ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
__magic_name__ = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
__magic_name__ = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=UpperCamelCase_ ) | 545 |
"""simple docstring"""
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
snake_case : Any = logging.getLogger(__name__)
snake_case : Optional[Any] = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
snake_case : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
__UpperCAmelCase = field(
default=a_ , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
__UpperCAmelCase = field(
default=a_ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(a_)} , )
__UpperCAmelCase = field(
default=a_ , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
__UpperCAmelCase = field(
default=a_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""})
__UpperCAmelCase = field(
default=a_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""})
__UpperCAmelCase = field(
default=a_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__UpperCAmelCase = field(
default=a_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__UpperCAmelCase = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__UpperCAmelCase = field(
default=a_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def a__ ( self : Optional[int] ):
'''simple docstring'''
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path' )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
__UpperCAmelCase = field(
default=a_ , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""})
__UpperCAmelCase = field(
default=a_ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""})
__UpperCAmelCase = field(default=a_ , metadata={"""help""": """The input training data file (a text file)."""})
__UpperCAmelCase = field(
default=a_ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__UpperCAmelCase = field(
default=a_ , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , )
__UpperCAmelCase = field(
default=a_ , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , )
__UpperCAmelCase = field(
default=a_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""})
__UpperCAmelCase = field(
default=5 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
__UpperCAmelCase = field(
default=a_ , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated. Default to the max input length of the model."""
)
} , )
__UpperCAmelCase = field(
default=a_ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__UpperCAmelCase = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""})
__UpperCAmelCase = field(
default=a_ , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
def a__ ( self : List[str] ):
'''simple docstring'''
if self.train_file is not None:
__magic_name__ = self.train_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
__magic_name__ = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def A ( __snake_case: Tuple , __snake_case: str ) -> Tuple:
"""simple docstring"""
with open(__snake_case , 'r' , encoding='utf-8' ) as f:
__magic_name__ = [json.loads(__snake_case ) for line in f.read().splitlines() if (len(__snake_case ) > 0 and not line.isspace())]
assert len(__snake_case ) == len(__snake_case )
__magic_name__ = {c: dataset[c] for c in dataset.column_names}
__magic_name__ = refs
return Dataset.from_dict(__snake_case )
def A ( ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__magic_name__ , __magic_name__ , __magic_name__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__magic_name__ , __magic_name__ , __magic_name__ = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__magic_name__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__magic_name__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , __snake_case )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__magic_name__ = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
__magic_name__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , )
__magic_name__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , )
else:
__magic_name__ = {}
if data_args.train_file is not None:
__magic_name__ = data_args.train_file
if data_args.validation_file is not None:
__magic_name__ = data_args.validation_file
__magic_name__ = data_args.train_file.split('.' )[-1]
if extension == "txt":
__magic_name__ = 'text'
__magic_name__ = load_dataset(__snake_case , data_files=__snake_case )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__magic_name__ = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
__magic_name__ = AutoConfig.from_pretrained(model_args.config_name , **__snake_case )
elif model_args.model_name_or_path:
__magic_name__ = AutoConfig.from_pretrained(model_args.model_name_or_path , **__snake_case )
else:
__magic_name__ = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
__magic_name__ = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
__magic_name__ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **__snake_case )
elif model_args.model_name_or_path:
__magic_name__ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **__snake_case )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
__magic_name__ = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
__magic_name__ = AutoModelForMaskedLM.from_config(__snake_case )
model.resize_token_embeddings(len(__snake_case ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
__magic_name__ = datasets['train'].column_names
else:
__magic_name__ = datasets['validation'].column_names
__magic_name__ = 'text' if 'text' in column_names else column_names[0]
__magic_name__ = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(__snake_case: str ):
# Remove empty lines
__magic_name__ = [line for line in examples['text'] if len(__snake_case ) > 0 and not line.isspace()]
return tokenizer(examples['text'] , padding=__snake_case , truncation=__snake_case , max_length=data_args.max_seq_length )
__magic_name__ = datasets.map(
__snake_case , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
__magic_name__ = add_chinese_references(tokenized_datasets['train'] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
__magic_name__ = add_chinese_references(
tokenized_datasets['validation'] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
__magic_name__ = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
__magic_name__ = False
# Data collator
# This one will take care of randomly masking the tokens.
__magic_name__ = DataCollatorForWholeWordMask(tokenizer=__snake_case , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__magic_name__ = Trainer(
model=__snake_case , args=__snake_case , train_dataset=tokenized_datasets['train'] if training_args.do_train else None , eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None , tokenizer=__snake_case , data_collator=__snake_case , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__magic_name__ = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
__magic_name__ = model_args.model_name_or_path
else:
__magic_name__ = None
__magic_name__ = trainer.train(resume_from_checkpoint=__snake_case )
trainer.save_model() # Saves the tokenizer too for easy upload
__magic_name__ = os.path.join(training_args.output_dir , 'train_results.txt' )
if trainer.is_world_process_zero():
with open(__snake_case , 'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# Evaluation
__magic_name__ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__magic_name__ = trainer.evaluate()
__magic_name__ = math.exp(eval_output['eval_loss'] )
__magic_name__ = perplexity
__magic_name__ = os.path.join(training_args.output_dir , 'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(__snake_case , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
return results
def A ( __snake_case: List[str] ) -> Union[str, Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main() | 545 | 1 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
__snake_case : int =['small', 'medium', 'large']
__snake_case : List[Any] ='lm_head.decoder.weight'
__snake_case : str ='lm_head.weight'
def lowerCAmelCase__ ( lowerCamelCase_ : str ,lowerCamelCase_ : str):
'''simple docstring'''
lowerCAmelCase__ : int = torch.load(lowerCamelCase_)
lowerCAmelCase__ : List[Any] = d.pop(lowerCamelCase_)
os.makedirs(lowerCamelCase_ ,exist_ok=lowerCamelCase_)
torch.save(lowerCamelCase_ ,os.path.join(lowerCamelCase_ ,lowerCamelCase_))
if __name__ == "__main__":
__snake_case : Optional[Any] =argparse.ArgumentParser()
parser.add_argument('--dialogpt_path', default='.', type=str)
__snake_case : str =parser.parse_args()
for MODEL in DIALOGPT_MODELS:
__snake_case : int =os.path.join(args.dialogpt_path, f"""{MODEL}_ft.pkl""")
__snake_case : Optional[Any] =f"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 90 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =AutoencoderKL
snake_case_ ="""sample"""
snake_case_ =1e-2
@property
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : str = 4
lowerCAmelCase__ : int = 3
lowerCAmelCase__ : List[Any] = (32, 32)
lowerCAmelCase__ : Tuple = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
return {"sample": image}
@property
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
return (3, 32, 32)
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : int = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
lowerCAmelCase__ : str = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
pass
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
pass
@unittest.skipIf(torch_device == '''mps''' ,'''Gradient checkpointing skipped on MPS''' )
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : int = self.prepare_init_args_and_inputs_for_common()
lowerCAmelCase__ : Tuple = self.model_class(**__lowerCamelCase )
model.to(__lowerCamelCase )
assert not model.is_gradient_checkpointing and model.training
lowerCAmelCase__ : Tuple = model(**__lowerCamelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
lowerCAmelCase__ : Optional[Any] = torch.randn_like(__lowerCamelCase )
lowerCAmelCase__ : Tuple = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
lowerCAmelCase__ : str = self.model_class(**__lowerCamelCase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(__lowerCamelCase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
lowerCAmelCase__ : Dict = model_a(**__lowerCamelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
lowerCAmelCase__ : Any = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
lowerCAmelCase__ : List[str] = dict(model.named_parameters() )
lowerCAmelCase__ : Optional[Any] = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data ,named_params_a[name].grad.data ,atol=5e-5 ) )
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : str = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' ,output_loading_info=__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) ,0 )
model.to(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : str = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' )
lowerCAmelCase__ : Tuple = model.to(__lowerCamelCase )
model.eval()
if torch_device == "mps":
lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(0 )
else:
lowerCAmelCase__ : Any = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
lowerCAmelCase__ : Optional[int] = torch.randn(
1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size ,generator=torch.manual_seed(0 ) ,)
lowerCAmelCase__ : Dict = image.to(__lowerCamelCase )
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(__lowerCamelCase ,sample_posterior=__lowerCamelCase ,generator=__lowerCamelCase ).sample
lowerCAmelCase__ : Optional[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
lowerCAmelCase__ : List[str] = torch.tensor(
[
-4.0078e-01,
-3.8323e-04,
-1.2681e-01,
-1.1462e-01,
2.0095e-01,
1.0893e-01,
-8.8247e-02,
-3.0361e-01,
-9.8644e-03,
] )
elif torch_device == "cpu":
lowerCAmelCase__ : str = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
lowerCAmelCase__ : Any = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(__lowerCamelCase ,__lowerCamelCase ,rtol=1e-2 ) )
@slow
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> List[Any]:
"""simple docstring"""
return f"""gaussian_noise_s={seed}_shape={'_'.join([str(__lowerCamelCase ) for s in shape] )}.npy"""
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ (self ,__lowerCamelCase=0 ,__lowerCamelCase=(4, 3, 5_12, 5_12) ,__lowerCamelCase=False ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = torch.floataa if fpaa else torch.floataa
lowerCAmelCase__ : Dict = torch.from_numpy(load_hf_numpy(self.get_file_format(__lowerCamelCase ,__lowerCamelCase ) ) ).to(__lowerCamelCase ).to(__lowerCamelCase )
return image
def lowerCAmelCase__ (self ,__lowerCamelCase="CompVis/stable-diffusion-v1-4" ,__lowerCamelCase=False ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Dict = '''fp16''' if fpaa else None
lowerCAmelCase__ : Optional[Any] = torch.floataa if fpaa else torch.floataa
lowerCAmelCase__ : List[Any] = AutoencoderKL.from_pretrained(
__lowerCamelCase ,subfolder='''vae''' ,torch_dtype=__lowerCamelCase ,revision=__lowerCamelCase ,)
model.to(__lowerCamelCase ).eval()
return model
def lowerCAmelCase__ (self ,__lowerCamelCase=0 ) -> str:
"""simple docstring"""
if torch_device == "mps":
return torch.manual_seed(__lowerCamelCase )
return torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.get_sd_vae_model()
lowerCAmelCase__ : Any = self.get_sd_image(__lowerCamelCase )
lowerCAmelCase__ : List[str] = self.get_generator(__lowerCamelCase )
with torch.no_grad():
lowerCAmelCase__ : Dict = model(__lowerCamelCase ,generator=__lowerCamelCase ,sample_posterior=__lowerCamelCase ).sample
assert sample.shape == image.shape
lowerCAmelCase__ : Optional[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
lowerCAmelCase__ : Any = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(__lowerCamelCase ,__lowerCamelCase ,atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.get_sd_vae_model(fpaa=__lowerCamelCase )
lowerCAmelCase__ : List[str] = self.get_sd_image(__lowerCamelCase ,fpaa=__lowerCamelCase )
lowerCAmelCase__ : List[Any] = self.get_generator(__lowerCamelCase )
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model(__lowerCamelCase ,generator=__lowerCamelCase ,sample_posterior=__lowerCamelCase ).sample
assert sample.shape == image.shape
lowerCAmelCase__ : Optional[Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
lowerCAmelCase__ : Any = torch.tensor(__lowerCamelCase )
assert torch_all_close(__lowerCamelCase ,__lowerCamelCase ,atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : int = self.get_sd_vae_model()
lowerCAmelCase__ : Dict = self.get_sd_image(__lowerCamelCase )
with torch.no_grad():
lowerCAmelCase__ : Tuple = model(__lowerCamelCase ).sample
assert sample.shape == image.shape
lowerCAmelCase__ : Dict = sample[-1, -2:, -2:, :2].flatten().float().cpu()
lowerCAmelCase__ : List[Any] = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(__lowerCamelCase ,__lowerCamelCase ,atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Tuple = self.get_sd_vae_model()
lowerCAmelCase__ : Union[str, Any] = self.get_sd_image(__lowerCamelCase ,shape=(3, 4, 64, 64) )
with torch.no_grad():
lowerCAmelCase__ : str = model.decode(__lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
lowerCAmelCase__ : Dict = sample[-1, -2:, :2, -2:].flatten().cpu()
lowerCAmelCase__ : Any = torch.tensor(__lowerCamelCase )
assert torch_all_close(__lowerCamelCase ,__lowerCamelCase ,atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Any = self.get_sd_vae_model(fpaa=__lowerCamelCase )
lowerCAmelCase__ : Dict = self.get_sd_image(__lowerCamelCase ,shape=(3, 4, 64, 64) ,fpaa=__lowerCamelCase )
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model.decode(__lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
lowerCAmelCase__ : Dict = sample[-1, -2:, :2, -2:].flatten().float().cpu()
lowerCAmelCase__ : Union[str, Any] = torch.tensor(__lowerCamelCase )
assert torch_all_close(__lowerCamelCase ,__lowerCamelCase ,atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() ,reason='''xformers is not required when using PyTorch 2.0.''' )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Any = self.get_sd_vae_model(fpaa=__lowerCamelCase )
lowerCAmelCase__ : str = self.get_sd_image(__lowerCamelCase ,shape=(3, 4, 64, 64) ,fpaa=__lowerCamelCase )
with torch.no_grad():
lowerCAmelCase__ : Any = model.decode(__lowerCamelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model.decode(__lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
assert torch_all_close(__lowerCamelCase ,__lowerCamelCase ,atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() ,reason='''xformers is not required when using PyTorch 2.0.''' )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.get_sd_vae_model()
lowerCAmelCase__ : Dict = self.get_sd_image(__lowerCamelCase ,shape=(3, 4, 64, 64) )
with torch.no_grad():
lowerCAmelCase__ : Tuple = model.decode(__lowerCamelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model.decode(__lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
assert torch_all_close(__lowerCamelCase ,__lowerCamelCase ,atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = self.get_sd_vae_model()
lowerCAmelCase__ : Tuple = self.get_sd_image(__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = self.get_generator(__lowerCamelCase )
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model.encode(__lowerCamelCase ).latent_dist
lowerCAmelCase__ : int = dist.sample(generator=__lowerCamelCase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
lowerCAmelCase__ : Optional[int] = sample[0, -1, -3:, -3:].flatten().cpu()
lowerCAmelCase__ : int = torch.tensor(__lowerCamelCase )
lowerCAmelCase__ : Any = 3e-3 if torch_device != '''mps''' else 1e-2
assert torch_all_close(__lowerCamelCase ,__lowerCamelCase ,atol=__lowerCamelCase )
| 90 | 1 |
'''simple docstring'''
from math import asin, atan, cos, radians, sin, sqrt, tan
SCREAMING_SNAKE_CASE_ = 6_37_81_37.0
SCREAMING_SNAKE_CASE_ = 6_35_67_52.31_42_45
SCREAMING_SNAKE_CASE_ = 6_37_81_37
def UpperCamelCase__ ( _lowercase : str , _lowercase : int , _lowercase : Union[str, Any] , _lowercase : List[str] ) -> Optional[Any]:
__UpperCAmelCase: List[str] = (AXIS_A - AXIS_B) / AXIS_A
__UpperCAmelCase: Dict = atan((1 - flattening) * tan(radians(lowerCAmelCase__ ) ) )
__UpperCAmelCase: Dict = atan((1 - flattening) * tan(radians(lowerCAmelCase__ ) ) )
__UpperCAmelCase: Union[str, Any] = radians(lowerCAmelCase__ )
__UpperCAmelCase: Optional[int] = radians(lowerCAmelCase__ )
# Equation
__UpperCAmelCase: Optional[Any] = sin((phi_a - phi_a) / 2 )
__UpperCAmelCase: str = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
__UpperCAmelCase: Optional[Any] = sqrt(sin_sq_phi + (cos(lowerCAmelCase__ ) * cos(lowerCAmelCase__ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 523 |
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_lowercase : int =logging.get_logger(__name__)
class UpperCamelCase_ ( snake_case__ ):
def __init__( self : Tuple , *lowerCamelCase : List[Any] , **lowerCamelCase : Union[str, Any] ):
warnings.warn(
'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use YolosImageProcessor instead.' , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 364 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""",
}
class UpperCAmelCase_ ( __lowercase , __lowercase ):
lowerCamelCase : List[Any] = '''resnet'''
lowerCamelCase : List[str] = ['''basic''', '''bottleneck''']
def __init__( self : Tuple , UpperCAmelCase__ : List[Any]=3 , UpperCAmelCase__ : List[str]=6_4 , UpperCAmelCase__ : int=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , UpperCAmelCase__ : Any=[3, 4, 6, 3] , UpperCAmelCase__ : List[Any]="bottleneck" , UpperCAmelCase__ : Any="relu" , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : int=None , **UpperCAmelCase__ : List[Any] , ) -> int:
super().__init__(**UpperCAmelCase__ )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
lowerCAmelCase = num_channels
lowerCAmelCase = embedding_size
lowerCAmelCase = hidden_sizes
lowerCAmelCase = depths
lowerCAmelCase = layer_type
lowerCAmelCase = hidden_act
lowerCAmelCase = downsample_in_first_stage
lowerCAmelCase = ['stem'] + [F'''stage{idx}''' for idx in range(1 , len(UpperCAmelCase__ ) + 1 )]
lowerCAmelCase , lowerCAmelCase = get_aligned_output_features_output_indices(
out_features=UpperCAmelCase__ , out_indices=UpperCAmelCase__ , stage_names=self.stage_names )
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Optional[int] = version.parse('''1.11''' )
@property
def __UpperCAmelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __UpperCAmelCase ( self : Tuple ) -> float:
return 1E-3
| 716 |
'''simple docstring'''
def a_ ( lowerCamelCase : Tuple , lowerCamelCase : Tuple ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
lowerCAmelCase = (boundary[1] - boundary[0]) / steps
lowerCAmelCase = boundary[0]
lowerCAmelCase = boundary[1]
lowerCAmelCase = make_points(lowerCamelCase , lowerCamelCase , lowerCamelCase )
lowerCAmelCase = 0.0
y += (h / 2.0) * f(lowerCamelCase )
for i in x_i:
# print(i)
y += h * f(lowerCamelCase )
y += (h / 2.0) * f(lowerCamelCase )
return y
def a_ ( lowerCamelCase : Dict , lowerCamelCase : Union[str, Any] , lowerCamelCase : Any ):
lowerCAmelCase = a + h
while x < (b - h):
yield x
lowerCAmelCase = x + h
def a_ ( lowerCamelCase : Optional[Any] ): # enter your function here
lowerCAmelCase = (x - 0) * (x - 0)
return y
def a_ ( ):
lowerCAmelCase = 0.0 # Lower bound of integration
lowerCAmelCase = 1.0 # Upper bound of integration
lowerCAmelCase = 10.0 # define number of steps or resolution
lowerCAmelCase = [a, b] # define boundary of integration
lowerCAmelCase = method_a(lowerCamelCase , lowerCamelCase )
print(f'''y = {y}''' )
if __name__ == "__main__":
main()
| 513 | 0 |
'''simple docstring'''
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
_A: Tuple = True
from torch.cuda.amp import autocast
_A: List[str] = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase :
_A : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_A : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
_A : Optional[bool] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
_A : Optional[bool] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Whether to log verbose messages or not."""} , )
_A : Optional[float] = field(
default=2.0 , metadata={"""help""": """Maximum temperature for gumbel softmax."""} )
_A : Optional[float] = field(
default=0.5 , metadata={"""help""": """Minimum temperature for gumbel softmax."""} )
_A : Optional[float] = field(
default=0.99_9995 , metadata={"""help""": """Decay of gumbel temperature during training."""} )
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase )-> Tuple:
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
__UpperCAmelCase = logging.WARNING
if model_args.verbose_logging:
__UpperCAmelCase = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
__UpperCAmelCase = logging.INFO
logger.setLevel(_lowerCAmelCase )
@dataclass
class UpperCAmelCase :
_A : str = field(
default=UpperCAmelCase_ , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
_A : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
_A : Optional[str] = field(
default="""train""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
_A : Optional[str] = field(
default="""validation""" , metadata={
"""help""": (
"""The name of the validation data set split to use (via the datasets library). Defaults to 'validation'"""
)
} , )
_A : Optional[str] = field(
default="""file""" , metadata={"""help""": """Column in the dataset that contains speech file path. Defaults to 'file'"""} , )
_A : bool = field(
default=UpperCAmelCase_ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
_A : Optional[int] = field(
default=1 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
_A : Optional[int] = field(
default=UpperCAmelCase_ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
_A : Optional[float] = field(
default=20.0 , metadata={"""help""": """Filter audio files that are longer than `max_duration_in_seconds` seconds"""} )
@dataclass
class UpperCAmelCase :
_A : WavaVecaForPreTraining
_A : WavaVecaFeatureExtractor
_A : Union[bool, str] = "longest"
_A : Optional[int] = None
_A : Optional[int] = None
def __call__( self , __A ):
# reformat list to dict and set to pytorch format
__UpperCAmelCase = self.feature_extractor.pad(
__A , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
__UpperCAmelCase = self.model._get_feat_extract_output_lengths(batch['input_values'].shape[-1] )
__UpperCAmelCase = batch['input_values'].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
__UpperCAmelCase = self.model._get_feat_extract_output_lengths(batch['attention_mask'].sum(-1 ) ).to(
torch.long )
__UpperCAmelCase = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['input_values'].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
__UpperCAmelCase = 1
__UpperCAmelCase = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
__UpperCAmelCase = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=__A , min_masks=2 , )
return batch
class UpperCAmelCase ( UpperCAmelCase_ ):
def __init__( self , *__A , __A=1 , __A=0 , __A=1.0 , **__A ):
super().__init__(*__A , **__A )
__UpperCAmelCase = 0
__UpperCAmelCase = max_gumbel_temp
__UpperCAmelCase = min_gumbel_temp
__UpperCAmelCase = gumbel_temp_decay
def __lowerCamelCase ( self , __A , __A ):
model.train()
__UpperCAmelCase = self._prepare_inputs(__A )
if self.use_amp:
with autocast():
__UpperCAmelCase = self.compute_loss(__A , __A )
else:
__UpperCAmelCase = self.compute_loss(__A , __A )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
__UpperCAmelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__UpperCAmelCase = loss.sum() / (inputs['mask_time_indices']).sum()
else:
raise ValueError(f'{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']' )
if self.args.gradient_accumulation_steps > 1:
__UpperCAmelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__A ).backward()
elif self.use_apex:
with amp.scale_loss(__A , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__A )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def _lowerCAmelCase ( )-> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = parser.parse_args_into_dataclasses()
configure_logger(_lowerCAmelCase , _lowerCAmelCase )
# Downloading and loading a dataset from the hub.
__UpperCAmelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
__UpperCAmelCase = DatasetDict()
__UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'{data_args.train_split_name}[:{data_args.validation_split_percentage}%]' , cache_dir=model_args.cache_dir , )
__UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'{data_args.train_split_name}[{data_args.validation_split_percentage}%:]' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
__UpperCAmelCase = DatasetDict()
__UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='validation' , cache_dir=model_args.cache_dir , )
__UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'{data_args.train_split_name}' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
__UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_lowerCAmelCase )
def prepare_dataset(_lowerCAmelCase ):
# check that all files have the correct sampling rate
__UpperCAmelCase , __UpperCAmelCase = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
__UpperCAmelCase = datasets.map(
_lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['train'].column_names )
# filter audio files that are too long
__UpperCAmelCase = vectorized_datasets.filter(
lambda _lowerCAmelCase : len(data['speech'] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(_lowerCAmelCase ):
return feature_extractor(batch['speech'] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
__UpperCAmelCase = vectorized_datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['train'].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
__UpperCAmelCase = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'
' ``config.feat_extract_norm=\'layer\'' )
__UpperCAmelCase = WavaVecaForPreTraining(_lowerCAmelCase )
__UpperCAmelCase = DataCollatorForWavaVecaPretraining(model=_lowerCAmelCase , feature_extractor=_lowerCAmelCase )
__UpperCAmelCase = WavaVecaPreTrainer(
model=_lowerCAmelCase , data_collator=_lowerCAmelCase , args=_lowerCAmelCase , train_dataset=vectorized_datasets['train'] , eval_dataset=vectorized_datasets['validation'] , tokenizer=_lowerCAmelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 126 |
'''simple docstring'''
from typing import Any
import numpy as np
def _lowerCAmelCase ( _lowerCAmelCase )-> bool:
return np.array_equal(_lowerCAmelCase , matrix.conjugate().T )
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase )-> Any:
__UpperCAmelCase = v.conjugate().T
__UpperCAmelCase = v_star.dot(_lowerCAmelCase )
assert isinstance(_lowerCAmelCase , np.ndarray )
return (v_star_dot.dot(_lowerCAmelCase )) / (v_star.dot(_lowerCAmelCase ))
def _lowerCAmelCase ( )-> None:
__UpperCAmelCase = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
__UpperCAmelCase = np.array([[1], [2], [3]] )
assert is_hermitian(_lowerCAmelCase ), F'{a} is not hermitian.'
print(rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) )
__UpperCAmelCase = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(_lowerCAmelCase ), F'{a} is not hermitian.'
assert rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 126 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import TypedDict
class UpperCamelCase ( _UpperCamelCase ):
UpperCAmelCase : str
UpperCAmelCase : int
def __UpperCAmelCase ( UpperCAmelCase_ : Any ) -> list[str]:
'''simple docstring'''
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError('The parameter s type must be str.' )
return [s[i:] + s[:i] for i in range(len(lowercase_ ) )]
def __UpperCAmelCase ( UpperCAmelCase_ : Union[str, Any] ) -> BWTTransformDict:
'''simple docstring'''
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError('The parameter s type must be str.' )
if not s:
raise ValueError('The parameter s must not be empty.' )
__snake_case : str = all_rotations(lowercase_ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
__snake_case : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(lowercase_ ),
}
return response
def __UpperCAmelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] ) -> str:
'''simple docstring'''
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError('The parameter bwt_string type must be str.' )
if not bwt_string:
raise ValueError('The parameter bwt_string must not be empty.' )
try:
__snake_case : str = int(lowercase_ )
except ValueError:
raise TypeError(
'The parameter idx_original_string type must be int or passive'
' of cast to int.' )
if idx_original_string < 0:
raise ValueError('The parameter idx_original_string must not be lower than 0.' )
if idx_original_string >= len(lowercase_ ):
raise ValueError(
'The parameter idx_original_string must be lower than' ' len(bwt_string).' )
__snake_case : Dict = [""] * len(lowercase_ )
for _ in range(len(lowercase_ ) ):
for i in range(len(lowercase_ ) ):
__snake_case : str = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
_a : Optional[Any]= "Provide a string that I will generate its BWT transform: "
_a : Tuple= input(entry_msg).strip()
_a : int= bwt_transform(s)
print(
f'''Burrows Wheeler transform for string \'{s}\' results '''
f'''in \'{result['bwt_string']}\''''
)
_a : Union[str, Any]= reverse_bwt(result["bwt_string"], result["idx_original_string"])
print(
f'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' '''
f'''we get original string \'{original_string}\''''
)
| 707 | """simple docstring"""
from typing import Any
class UpperCamelCase :
def __init__(self : List[str] , _A : Any) -> int:
__snake_case : Any = data
__snake_case : Dict = None
def __repr__(self : Tuple) -> str:
return f"Node({self.data})"
class UpperCamelCase :
def __init__(self : Union[str, Any]) -> Union[str, Any]:
__snake_case : Any = None
def __iter__(self : Tuple) -> Any:
__snake_case : List[str] = self.head
while node:
yield node.data
__snake_case : Any = node.next
def __len__(self : str) -> int:
return sum(1 for _ in self)
def __repr__(self : int) -> str:
return "->".join([str(_A) for item in self])
def __getitem__(self : List[Any] , _A : int) -> Any:
if not 0 <= index < len(self):
raise ValueError('list index out of range.')
for i, node in enumerate(self):
if i == index:
return node
return None
def __setitem__(self : int , _A : int , _A : Any) -> None:
if not 0 <= index < len(self):
raise ValueError('list index out of range.')
__snake_case : Optional[int] = self.head
for _ in range(_A):
__snake_case : Any = current.next
__snake_case : Dict = data
def _lowercase (self : List[Any] , _A : Any) -> None:
self.insert_nth(len(self) , _A)
def _lowercase (self : List[str] , _A : Any) -> None:
self.insert_nth(0 , _A)
def _lowercase (self : Optional[Any] , _A : int , _A : Any) -> None:
if not 0 <= index <= len(self):
raise IndexError('list index out of range')
__snake_case : str = Node(_A)
if self.head is None:
__snake_case : str = new_node
elif index == 0:
__snake_case : Union[str, Any] = self.head # link new_node to head
__snake_case : int = new_node
else:
__snake_case : Any = self.head
for _ in range(index - 1):
__snake_case : Any = temp.next
__snake_case : Dict = temp.next
__snake_case : str = new_node
def _lowercase (self : Optional[int]) -> None: # print every node data
print(self)
def _lowercase (self : Optional[Any]) -> Any:
return self.delete_nth(0)
def _lowercase (self : List[str]) -> Any: # delete from tail
return self.delete_nth(len(self) - 1)
def _lowercase (self : int , _A : int = 0) -> Any:
if not 0 <= index <= len(self) - 1: # test if index is valid
raise IndexError('List index out of range.')
__snake_case : int = self.head # default first node
if index == 0:
__snake_case : Any = self.head.next
else:
__snake_case : List[Any] = self.head
for _ in range(index - 1):
__snake_case : List[str] = temp.next
__snake_case : Union[str, Any] = temp.next
__snake_case : str = temp.next.next
return delete_node.data
def _lowercase (self : str) -> bool:
return self.head is None
def _lowercase (self : Tuple) -> None:
__snake_case : List[Any] = None
__snake_case : Optional[Any] = self.head
while current:
# Store the current node's next node.
__snake_case : List[str] = current.next
# Make the current node's next point backwards
__snake_case : Optional[Any] = prev
# Make the previous node be the current node
__snake_case : Optional[Any] = current
# Make the current node the next node (to progress iteration)
__snake_case : Any = next_node
# Return prev in order to put the head at the end
__snake_case : Optional[Any] = prev
def __UpperCAmelCase ( ) -> None:
'''simple docstring'''
__snake_case : Union[str, Any] = LinkedList()
assert linked_list.is_empty() is True
assert str(UpperCAmelCase_ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(UpperCAmelCase_ ) == i
linked_list.insert_nth(UpperCAmelCase_ , i + 1 )
assert str(UpperCAmelCase_ ) == "->".join(str(UpperCAmelCase_ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(UpperCAmelCase_ ) == "->".join(str(UpperCAmelCase_ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(UpperCAmelCase_ ) == 9
assert str(UpperCAmelCase_ ) == "->".join(str(UpperCAmelCase_ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
__snake_case : Tuple = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(UpperCAmelCase_ ) == "->".join(str(UpperCAmelCase_ ) for i in range(-8 , 1 ) )
def __UpperCAmelCase ( ) -> None:
'''simple docstring'''
__snake_case : str = [
-9,
1_00,
Node(77_34_51_12 ),
'dlrow olleH',
7,
55_55,
0,
-192.55_555,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
__snake_case : Optional[Any] = LinkedList()
for i in test_input:
linked_list.insert_tail(UpperCAmelCase_ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(UpperCAmelCase_ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
__snake_case : int = linked_list.delete_head()
assert result == -9
assert (
str(UpperCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
__snake_case : Any = linked_list.delete_tail()
assert result == 12.2
assert (
str(UpperCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
__snake_case : Tuple = linked_list.delete_nth(10 )
assert result is None
assert (
str(UpperCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(UpperCAmelCase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(UpperCAmelCase_ )
assert (
str(UpperCAmelCase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(UpperCAmelCase_ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def __UpperCAmelCase ( ) -> List[Any]:
'''simple docstring'''
from doctest import testmod
testmod()
__snake_case : int = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(UpperCAmelCase_ )
print('\nReading/changing Node data using indexing:' )
print(F"Element at Position 1: {linked_list[1]}" )
__snake_case : Optional[Any] = input('Enter New Value: ' ).strip()
print('New list:' )
print(UpperCAmelCase_ )
print(F"length of linked_list is : {len(UpperCAmelCase_ )}" )
if __name__ == "__main__":
main()
| 192 | 0 |
def __a ( __UpperCAmelCase ):
a__ = len(__UpperCAmelCase )
a__ = len(matrix[0] )
a__ = min(__UpperCAmelCase , __UpperCAmelCase )
for row in range(__UpperCAmelCase ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , __UpperCAmelCase ):
a__ = matrix[col][row] / matrix[row][row]
for i in range(__UpperCAmelCase , __UpperCAmelCase ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
a__ = True
for i in range(row + 1 , __UpperCAmelCase ):
if matrix[i][row] != 0:
a__ , a__ = matrix[i], matrix[row]
a__ = False
break
if reduce:
rank -= 1
for i in range(__UpperCAmelCase ):
a__ = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 194 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
a_ : Any = logging.get_logger(__name__)
a_ : str = TypeVar('DatasetType', Dataset, IterableDataset)
def __a ( __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = "first_exhausted" , ):
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(__UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , (Dataset, IterableDataset) ):
if isinstance(__UpperCAmelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(__UpperCAmelCase )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCAmelCase ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCAmelCase ).__name__}." )
if i == 0:
a__ , a__ = (
(Dataset, IterableDataset) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , info=__UpperCAmelCase , split=__UpperCAmelCase , stopping_strategy=__UpperCAmelCase )
else:
return _interleave_iterable_datasets(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , info=__UpperCAmelCase , split=__UpperCAmelCase , stopping_strategy=__UpperCAmelCase )
def __a ( __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 0 , ):
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(__UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , (Dataset, IterableDataset) ):
if isinstance(__UpperCAmelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(__UpperCAmelCase )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCAmelCase ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCAmelCase ).__name__}." )
if i == 0:
a__ , a__ = (
(Dataset, IterableDataset) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(__UpperCAmelCase , info=__UpperCAmelCase , split=__UpperCAmelCase , axis=__UpperCAmelCase )
else:
return _concatenate_iterable_datasets(__UpperCAmelCase , info=__UpperCAmelCase , split=__UpperCAmelCase , axis=__UpperCAmelCase )
| 194 | 1 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowercase ( lowercase_ ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
__A = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase_ , """tf_padding""" ) )
self.parent.assertTrue(hasattr(UpperCamelCase_ , """depth_multiplier""" ) )
class __lowercase :
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int]=13 , UpperCamelCase_ : str=3 , UpperCamelCase_ : Union[str, Any]=32 , UpperCamelCase_ : Dict=0.25 , UpperCamelCase_ : Optional[int]=8 , UpperCamelCase_ : Dict=8 , UpperCamelCase_ : List[str]=6 , UpperCamelCase_ : Any=32 , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Union[str, Any]="relu6" , UpperCamelCase_ : int=1_280 , UpperCamelCase_ : str=0.1 , UpperCamelCase_ : List[Any]=0.02 , UpperCamelCase_ : int=True , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Optional[Any]=10 , UpperCamelCase_ : Any=None , ):
"""simple docstring"""
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = depth_multiplier
__A = depth_divisible_by
__A = min_depth
__A = expand_ratio
__A = tf_padding
__A = output_stride
__A = first_layer_is_expansion
__A = finegrained_output
__A = hidden_act
__A = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
__A = classifier_dropout_prob
__A = use_labels
__A = is_training
__A = num_labels
__A = initializer_range
__A = scope
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] , self.num_labels )
__A = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__A = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self : Union[str, Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int] ):
"""simple docstring"""
__A = MobileNetVaModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(UpperCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def lowerCAmelCase_ ( self : Any , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : List[str] ):
"""simple docstring"""
__A = self.num_labels
__A = MobileNetVaForImageClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int ):
"""simple docstring"""
__A = self.num_labels
__A = MobileNetVaForSemanticSegmentation(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(UpperCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__A = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__A = self.prepare_config_and_inputs()
__A , __A , __A , __A = config_and_inputs
__A = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{
"feature-extraction": MobileNetVaModel,
"image-classification": MobileNetVaForImageClassification,
"image-segmentation": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
__A = MobileNetVaModelTester(self )
__A = MobileNetVaConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV2 does not use inputs_embeds""" )
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not support input and output embeddings""" )
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not output attentions""" )
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(UpperCamelCase_ )
__A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict ):
__A = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
__A = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
__A = outputs.hidden_states
__A = 16
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase_ )
@slow
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = MobileNetVaModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def _SCREAMING_SNAKE_CASE ( ) -> Dict:
"""simple docstring"""
__A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v2_1.0_224""" ) if is_vision_available() else None
)
@slow
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
__A = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v2_1.0_224""" ).to(UpperCamelCase_ )
__A = self.default_image_processor
__A = prepare_img()
__A = image_processor(images=UpperCamelCase_ , return_tensors="""pt""" ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
__A = model(**UpperCamelCase_ )
# verify the logits
__A = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
__A = torch.tensor([0.2445, -1.1993, 0.1905] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1e-4 ) )
@slow
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
__A = MobileNetVaForSemanticSegmentation.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
__A = model.to(UpperCamelCase_ )
__A = MobileNetVaImageProcessor.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
__A = prepare_img()
__A = image_processor(images=UpperCamelCase_ , return_tensors="""pt""" ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
__A = model(**UpperCamelCase_ )
__A = outputs.logits
# verify the logits
__A = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , UpperCamelCase_ )
__A = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=UpperCamelCase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase_ , atol=1e-4 ) )
| 199 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__a : Optional[int] = {"tokenization_bertweet": ["BertweetTokenizer"]}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
__a : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 199 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase: Dict = logging.get_logger(__name__)
def _lowerCamelCase ( snake_case ):
_lowerCAmelCase = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
_lowerCAmelCase = [144, 192, 240]
_lowerCAmelCase = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
_lowerCAmelCase = [96, 120, 144]
_lowerCAmelCase = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
_lowerCAmelCase = [64, 80, 96]
_lowerCAmelCase = [16, 16, 24, 48, 64, 80, 320]
_lowerCAmelCase = 0.05
_lowerCAmelCase = 2.0
if mobilevit_name.startswith('deeplabv3_' ):
_lowerCAmelCase = 512
_lowerCAmelCase = 16
_lowerCAmelCase = 21
_lowerCAmelCase = 'pascal-voc-id2label.json'
else:
_lowerCAmelCase = 1_000
_lowerCAmelCase = 'imagenet-1k-id2label.json'
_lowerCAmelCase = 'huggingface/label-files'
_lowerCAmelCase = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='dataset' ) , 'r' ) )
_lowerCAmelCase = {int(snake_case ): v for k, v in idalabel.items()}
_lowerCAmelCase = idalabel
_lowerCAmelCase = {v: k for k, v in idalabel.items()}
return config
def _lowerCamelCase ( snake_case , snake_case=False ):
for i in range(1 , 6 ):
if F'layer_{i}.' in name:
_lowerCAmelCase = name.replace(F'layer_{i}.' , F'encoder.layer.{i - 1}.' )
if "conv_1." in name:
_lowerCAmelCase = name.replace('conv_1.' , 'conv_stem.' )
if ".block." in name:
_lowerCAmelCase = name.replace('.block.' , '.' )
if "exp_1x1" in name:
_lowerCAmelCase = name.replace('exp_1x1' , 'expand_1x1' )
if "red_1x1" in name:
_lowerCAmelCase = name.replace('red_1x1' , 'reduce_1x1' )
if ".local_rep.conv_3x3." in name:
_lowerCAmelCase = name.replace('.local_rep.conv_3x3.' , '.conv_kxk.' )
if ".local_rep.conv_1x1." in name:
_lowerCAmelCase = name.replace('.local_rep.conv_1x1.' , '.conv_1x1.' )
if ".norm." in name:
_lowerCAmelCase = name.replace('.norm.' , '.normalization.' )
if ".conv." in name:
_lowerCAmelCase = name.replace('.conv.' , '.convolution.' )
if ".conv_proj." in name:
_lowerCAmelCase = name.replace('.conv_proj.' , '.conv_projection.' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F'.{i}.{j}.' in name:
_lowerCAmelCase = name.replace(F'.{i}.{j}.' , F'.{i}.layer.{j}.' )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F'.{i}.{j}.' in name:
_lowerCAmelCase = name.replace(F'.{i}.{j}.' , F'.{i}.' )
if "expand_1x1" in name:
_lowerCAmelCase = name.replace('expand_1x1' , 'downsampling_layer.expand_1x1' )
if "conv_3x3" in name:
_lowerCAmelCase = name.replace('conv_3x3' , 'downsampling_layer.conv_3x3' )
if "reduce_1x1" in name:
_lowerCAmelCase = name.replace('reduce_1x1' , 'downsampling_layer.reduce_1x1' )
for i in range(2 , 5 ):
if F'.global_rep.{i}.weight' in name:
_lowerCAmelCase = name.replace(F'.global_rep.{i}.weight' , '.layernorm.weight' )
if F'.global_rep.{i}.bias' in name:
_lowerCAmelCase = name.replace(F'.global_rep.{i}.bias' , '.layernorm.bias' )
if ".global_rep." in name:
_lowerCAmelCase = name.replace('.global_rep.' , '.transformer.' )
if ".pre_norm_mha.0." in name:
_lowerCAmelCase = name.replace('.pre_norm_mha.0.' , '.layernorm_before.' )
if ".pre_norm_mha.1.out_proj." in name:
_lowerCAmelCase = name.replace('.pre_norm_mha.1.out_proj.' , '.attention.output.dense.' )
if ".pre_norm_ffn.0." in name:
_lowerCAmelCase = name.replace('.pre_norm_ffn.0.' , '.layernorm_after.' )
if ".pre_norm_ffn.1." in name:
_lowerCAmelCase = name.replace('.pre_norm_ffn.1.' , '.intermediate.dense.' )
if ".pre_norm_ffn.4." in name:
_lowerCAmelCase = name.replace('.pre_norm_ffn.4.' , '.output.dense.' )
if ".transformer." in name:
_lowerCAmelCase = name.replace('.transformer.' , '.transformer.layer.' )
if ".aspp_layer." in name:
_lowerCAmelCase = name.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in name:
_lowerCAmelCase = name.replace('.aspp_pool.' , '.' )
if "seg_head." in name:
_lowerCAmelCase = name.replace('seg_head.' , 'segmentation_head.' )
if "segmentation_head.classifier.classifier." in name:
_lowerCAmelCase = name.replace('segmentation_head.classifier.classifier.' , 'segmentation_head.classifier.' )
if "classifier.fc." in name:
_lowerCAmelCase = name.replace('classifier.fc.' , 'classifier.' )
elif (not base_model) and ("segmentation_head." not in name):
_lowerCAmelCase = 'mobilevit.' + name
return name
def _lowerCamelCase ( snake_case , snake_case , snake_case=False ):
if base_model:
_lowerCAmelCase = ''
else:
_lowerCAmelCase = 'mobilevit.'
for key in orig_state_dict.copy().keys():
_lowerCAmelCase = orig_state_dict.pop(snake_case )
if key[:8] == "encoder.":
_lowerCAmelCase = key[8:]
if "qkv" in key:
_lowerCAmelCase = key.split('.' )
_lowerCAmelCase = int(key_split[0][6:] ) - 1
_lowerCAmelCase = int(key_split[3] )
_lowerCAmelCase = model.get_submodule(F'{model_prefix}encoder.layer.{layer_num}' )
_lowerCAmelCase = layer.transformer.layer[transformer_num].attention.attention.all_head_size
_lowerCAmelCase = (
F'{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'
)
if "weight" in key:
_lowerCAmelCase = val[:dim, :]
_lowerCAmelCase = val[dim : dim * 2, :]
_lowerCAmelCase = val[-dim:, :]
else:
_lowerCAmelCase = val[:dim]
_lowerCAmelCase = val[dim : dim * 2]
_lowerCAmelCase = val[-dim:]
else:
_lowerCAmelCase = val
return orig_state_dict
def _lowerCamelCase ( ):
_lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowerCAmelCase = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return im
@torch.no_grad()
def _lowerCamelCase ( snake_case , snake_case , snake_case , snake_case=False ):
_lowerCAmelCase = get_mobilevit_config(snake_case )
# load original state_dict
_lowerCAmelCase = torch.load(snake_case , map_location='cpu' )
# load 🤗 model
if mobilevit_name.startswith('deeplabv3_' ):
_lowerCAmelCase = MobileViTForSemanticSegmentation(snake_case ).eval()
else:
_lowerCAmelCase = MobileViTForImageClassification(snake_case ).eval()
_lowerCAmelCase = convert_state_dict(snake_case , snake_case )
model.load_state_dict(snake_case )
# Check outputs on an image, prepared by MobileViTImageProcessor
_lowerCAmelCase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_lowerCAmelCase = image_processor(images=prepare_img() , return_tensors='pt' )
_lowerCAmelCase = model(**snake_case )
_lowerCAmelCase = outputs.logits
if mobilevit_name.startswith('deeplabv3_' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
_lowerCAmelCase = torch.tensor(
[
[[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]],
[[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]],
[[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
_lowerCAmelCase = torch.tensor(
[
[[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]],
[[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]],
[[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
_lowerCAmelCase = torch.tensor(
[
[[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]],
[[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.86_24, -9.59_64], [-10.88_40, -10.81_58, -10.66_59]],
[[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]],
] )
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}' )
assert torch.allclose(logits[0, :3, :3, :3] , snake_case , atol=1E-4 )
else:
assert logits.shape == (1, 1_000)
if mobilevit_name == "mobilevit_s":
_lowerCAmelCase = torch.tensor([-0.98_66, 0.23_92, -1.12_41] )
elif mobilevit_name == "mobilevit_xs":
_lowerCAmelCase = torch.tensor([-2.47_61, -0.93_99, -1.95_87] )
elif mobilevit_name == "mobilevit_xxs":
_lowerCAmelCase = torch.tensor([-1.93_64, -1.23_27, -0.46_53] )
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}' )
assert torch.allclose(logits[0, :3] , snake_case , atol=1E-4 )
Path(snake_case ).mkdir(exist_ok=snake_case )
print(F'Saving model {mobilevit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case )
if push_to_hub:
_lowerCAmelCase = {
'mobilevit_s': 'mobilevit-small',
'mobilevit_xs': 'mobilevit-x-small',
'mobilevit_xxs': 'mobilevit-xx-small',
'deeplabv3_mobilevit_s': 'deeplabv3-mobilevit-small',
'deeplabv3_mobilevit_xs': 'deeplabv3-mobilevit-x-small',
'deeplabv3_mobilevit_xxs': 'deeplabv3-mobilevit-xx-small',
}
print('Pushing to the hub...' )
_lowerCAmelCase = model_mapping[mobilevit_name]
image_processor.push_to_hub(snake_case , organization='apple' )
model.push_to_hub(snake_case , organization='apple' )
if __name__ == "__main__":
_lowercase: Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_lowercase: List[str] = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 192 | from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowercase: Any = logging.get_logger(__name__)
# General docstring
_lowercase: List[Any] = '''RegNetConfig'''
# Base docstring
_lowercase: List[Any] = '''facebook/regnet-y-040'''
_lowercase: int = [1, 1_0_8_8, 7, 7]
# Image classification docstring
_lowercase: Union[str, Any] = '''facebook/regnet-y-040'''
_lowercase: Tuple = '''tabby, tabby cat'''
_lowercase: str = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Dict , lowercase__ : int , lowercase__ : int , lowercase__ : int = 3 , lowercase__ : int = 1 , lowercase__ : int = 1 , lowercase__ : Optional[str] = "relu" , ):
super().__init__()
_lowerCAmelCase = nn.Convad(
lowercase__ , lowercase__ , kernel_size=lowercase__ , stride=lowercase__ , padding=kernel_size // 2 , groups=lowercase__ , bias=lowercase__ , )
_lowerCAmelCase = nn.BatchNormad(lowercase__ )
_lowerCAmelCase = ACTaFN[activation] if activation is not None else nn.Identity()
def SCREAMING_SNAKE_CASE__ ( self : str , lowercase__ : Union[str, Any] ):
_lowerCAmelCase = self.convolution(lowercase__ )
_lowerCAmelCase = self.normalization(lowercase__ )
_lowerCAmelCase = self.activation(lowercase__ )
return hidden_state
class lowerCamelCase__ ( nn.Module ):
def __init__( self : int , lowercase__ : RegNetConfig ):
super().__init__()
_lowerCAmelCase = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
_lowerCAmelCase = config.num_channels
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , lowercase__ : Optional[int] ):
_lowerCAmelCase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
_lowerCAmelCase = self.embedder(lowercase__ )
return hidden_state
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Optional[Any] , lowercase__ : int , lowercase__ : int , lowercase__ : int = 2 ):
super().__init__()
_lowerCAmelCase = nn.Convad(lowercase__ , lowercase__ , kernel_size=1 , stride=lowercase__ , bias=lowercase__ )
_lowerCAmelCase = nn.BatchNormad(lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , lowercase__ : Tensor ):
_lowerCAmelCase = self.convolution(lowercase__ )
_lowerCAmelCase = self.normalization(lowercase__ )
return hidden_state
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Optional[Any] , lowercase__ : int , lowercase__ : int ):
super().__init__()
_lowerCAmelCase = nn.AdaptiveAvgPoolad((1, 1) )
_lowerCAmelCase = nn.Sequential(
nn.Convad(lowercase__ , lowercase__ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowercase__ , lowercase__ , kernel_size=1 ) , nn.Sigmoid() , )
def SCREAMING_SNAKE_CASE__ ( self : Any , lowercase__ : List[Any] ):
# b c h w -> b c 1 1
_lowerCAmelCase = self.pooler(lowercase__ )
_lowerCAmelCase = self.attention(lowercase__ )
_lowerCAmelCase = hidden_state * attention
return hidden_state
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Dict , lowercase__ : RegNetConfig , lowercase__ : int , lowercase__ : int , lowercase__ : int = 1 ):
super().__init__()
_lowerCAmelCase = in_channels != out_channels or stride != 1
_lowerCAmelCase = max(1 , out_channels // config.groups_width )
_lowerCAmelCase = (
RegNetShortCut(lowercase__ , lowercase__ , stride=lowercase__ ) if should_apply_shortcut else nn.Identity()
)
_lowerCAmelCase = nn.Sequential(
RegNetConvLayer(lowercase__ , lowercase__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase__ , lowercase__ , stride=lowercase__ , groups=lowercase__ , activation=config.hidden_act ) , RegNetConvLayer(lowercase__ , lowercase__ , kernel_size=1 , activation=lowercase__ ) , )
_lowerCAmelCase = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE__ ( self : Dict , lowercase__ : Any ):
_lowerCAmelCase = hidden_state
_lowerCAmelCase = self.layer(lowercase__ )
_lowerCAmelCase = self.shortcut(lowercase__ )
hidden_state += residual
_lowerCAmelCase = self.activation(lowercase__ )
return hidden_state
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Any , lowercase__ : RegNetConfig , lowercase__ : int , lowercase__ : int , lowercase__ : int = 1 ):
super().__init__()
_lowerCAmelCase = in_channels != out_channels or stride != 1
_lowerCAmelCase = max(1 , out_channels // config.groups_width )
_lowerCAmelCase = (
RegNetShortCut(lowercase__ , lowercase__ , stride=lowercase__ ) if should_apply_shortcut else nn.Identity()
)
_lowerCAmelCase = nn.Sequential(
RegNetConvLayer(lowercase__ , lowercase__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase__ , lowercase__ , stride=lowercase__ , groups=lowercase__ , activation=config.hidden_act ) , RegNetSELayer(lowercase__ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowercase__ , lowercase__ , kernel_size=1 , activation=lowercase__ ) , )
_lowerCAmelCase = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , lowercase__ : Tuple ):
_lowerCAmelCase = hidden_state
_lowerCAmelCase = self.layer(lowercase__ )
_lowerCAmelCase = self.shortcut(lowercase__ )
hidden_state += residual
_lowerCAmelCase = self.activation(lowercase__ )
return hidden_state
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Dict , lowercase__ : RegNetConfig , lowercase__ : int , lowercase__ : int , lowercase__ : int = 2 , lowercase__ : int = 2 , ):
super().__init__()
_lowerCAmelCase = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer
_lowerCAmelCase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowercase__ , lowercase__ , lowercase__ , stride=lowercase__ , ) , *[layer(lowercase__ , lowercase__ , lowercase__ ) for _ in range(depth - 1 )] , )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , lowercase__ : Any ):
_lowerCAmelCase = self.layers(lowercase__ )
return hidden_state
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Dict , lowercase__ : RegNetConfig ):
super().__init__()
_lowerCAmelCase = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowercase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_lowerCAmelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase__ , config.depths[1:] ):
self.stages.append(RegNetStage(lowercase__ , lowercase__ , lowercase__ , depth=lowercase__ ) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , lowercase__ : Tensor , lowercase__ : bool = False , lowercase__ : bool = True ):
_lowerCAmelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_lowerCAmelCase = hidden_states + (hidden_state,)
_lowerCAmelCase = stage_module(lowercase__ )
if output_hidden_states:
_lowerCAmelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowercase__ , hidden_states=lowercase__ )
class lowerCamelCase__ ( UpperCAmelCase ):
UpperCamelCase__ =RegNetConfig
UpperCamelCase__ ="regnet"
UpperCamelCase__ ="pixel_values"
UpperCamelCase__ =True
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , lowercase__ : List[Any] ):
if isinstance(lowercase__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowercase__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def SCREAMING_SNAKE_CASE__ ( self : Any , lowercase__ : List[str] , lowercase__ : List[Any]=False ):
if isinstance(lowercase__ , lowercase__ ):
_lowerCAmelCase = value
_lowercase: Optional[Any] = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_lowercase: str = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." ,UpperCAmelCase ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class lowerCamelCase__ ( UpperCAmelCase ):
def __init__( self : List[str] , lowercase__ : int ):
super().__init__(lowercase__ )
_lowerCAmelCase = config
_lowerCAmelCase = RegNetEmbeddings(lowercase__ )
_lowerCAmelCase = RegNetEncoder(lowercase__ )
_lowerCAmelCase = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase__ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE__ ( self : Any , lowercase__ : Tensor , lowercase__ : Optional[bool] = None , lowercase__ : Optional[bool] = None ):
_lowerCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase = self.embedder(lowercase__ )
_lowerCAmelCase = self.encoder(
lowercase__ , output_hidden_states=lowercase__ , return_dict=lowercase__ )
_lowerCAmelCase = encoder_outputs[0]
_lowerCAmelCase = self.pooler(lowercase__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase__ , pooler_output=lowercase__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,UpperCAmelCase ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class lowerCamelCase__ ( UpperCAmelCase ):
def __init__( self : str , lowercase__ : Union[str, Any] ):
super().__init__(lowercase__ )
_lowerCAmelCase = config.num_labels
_lowerCAmelCase = RegNetModel(lowercase__ )
# classification head
_lowerCAmelCase = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE__ ( self : int , lowercase__ : Optional[torch.FloatTensor] = None , lowercase__ : Optional[torch.LongTensor] = None , lowercase__ : Optional[bool] = None , lowercase__ : Optional[bool] = None , ):
_lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase = self.regnet(lowercase__ , output_hidden_states=lowercase__ , return_dict=lowercase__ )
_lowerCAmelCase = outputs.pooler_output if return_dict else outputs[1]
_lowerCAmelCase = self.classifier(lowercase__ )
_lowerCAmelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_lowerCAmelCase = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_lowerCAmelCase = 'single_label_classification'
else:
_lowerCAmelCase = 'multi_label_classification'
if self.config.problem_type == "regression":
_lowerCAmelCase = MSELoss()
if self.num_labels == 1:
_lowerCAmelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_lowerCAmelCase = loss_fct(lowercase__ , lowercase__ )
elif self.config.problem_type == "single_label_classification":
_lowerCAmelCase = CrossEntropyLoss()
_lowerCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_lowerCAmelCase = BCEWithLogitsLoss()
_lowerCAmelCase = loss_fct(lowercase__ , lowercase__ )
if not return_dict:
_lowerCAmelCase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase__ , logits=lowercase__ , hidden_states=outputs.hidden_states )
| 192 | 1 |
'''simple docstring'''
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class lowerCamelCase ( __UpperCAmelCase ):
# to overwrite at feature extractactor specific tests
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
@property
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
return self.feat_extract_tester.prepare_feat_extract_dict()
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
_snake_case: Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(__snake_case , 'feature_size' ) )
self.assertTrue(hasattr(__snake_case , 'sampling_rate' ) )
self.assertTrue(hasattr(__snake_case , 'padding_value' ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case: Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common()
_snake_case: Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
_snake_case: Union[str, Any] = feat_extract.model_input_names[0]
_snake_case: Union[str, Any] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(__snake_case ) == len(__snake_case ) for x, y in zip(__snake_case , processed_features[input_name] ) ) )
_snake_case: int = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__snake_case )
_snake_case: List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
_snake_case: Union[str, Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_snake_case: List[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
_snake_case: int = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__snake_case )
_snake_case: Any = self.feature_extraction_class(**self.feat_extract_dict )
_snake_case: Union[str, Any] = feat_extract.model_input_names[0]
_snake_case: List[str] = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
_snake_case: Any = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_snake_case: Dict = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case: Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__snake_case )
_snake_case: List[str] = self.feature_extraction_class(**self.feat_extract_dict )
_snake_case: List[Any] = feat_extract.model_input_names[0]
_snake_case: Any = BatchFeature({input_name: speech_inputs} , tensor_type='tf' )
_snake_case: Dict = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_snake_case: List[str] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def SCREAMING_SNAKE_CASE_ ( self : Any , __snake_case : Any=False ):
'''simple docstring'''
def _inputs_have_equal_length(__snake_case : List[Any] ):
_snake_case: Union[str, Any] = len(input[0] )
for input_slice in input[1:]:
if len(__snake_case ) != length:
return False
return True
def _inputs_are_equal(__snake_case : Union[str, Any] , __snake_case : str ):
if len(__snake_case ) != len(__snake_case ):
return False
for input_slice_a, input_slice_a in zip(__snake_case , __snake_case ):
if not np.allclose(np.asarray(__snake_case ) , np.asarray(__snake_case ) , atol=1e-3 ):
return False
return True
_snake_case: Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
_snake_case: Dict = self.feat_extract_tester.prepare_inputs_for_common(numpify=__snake_case )
_snake_case: Union[str, Any] = feat_extract.model_input_names[0]
_snake_case: Dict = BatchFeature({input_name: speech_inputs} )
_snake_case: Optional[int] = self.feat_extract_tester.seq_length_diff
_snake_case: Optional[int] = self.feat_extract_tester.max_seq_length + pad_diff
_snake_case: Any = self.feat_extract_tester.min_seq_length
_snake_case: Dict = self.feat_extract_tester.batch_size
_snake_case: int = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
_snake_case: Optional[int] = feat_extract.pad(__snake_case , padding=__snake_case )
_snake_case: int = input_a[input_name]
_snake_case: Tuple = feat_extract.pad(__snake_case , padding='longest' )
_snake_case: Any = input_a[input_name]
_snake_case: Union[str, Any] = feat_extract.pad(__snake_case , padding='max_length' , max_length=len(speech_inputs[-1] ) )
_snake_case: Dict = input_a[input_name]
_snake_case: Any = feat_extract.pad(__snake_case , padding='longest' , return_tensors='np' )
_snake_case: int = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(__snake_case ):
feat_extract.pad(__snake_case , padding='max_length' )[input_name]
_snake_case: Optional[int] = feat_extract.pad(
__snake_case , padding='max_length' , max_length=__snake_case , return_tensors='np' )
_snake_case: Tuple = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(__snake_case ) )
self.assertTrue(_inputs_have_equal_length(__snake_case ) )
self.assertTrue(_inputs_have_equal_length(__snake_case ) )
self.assertTrue(_inputs_are_equal(__snake_case , __snake_case ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
_snake_case: Union[str, Any] = feat_extract.pad(__snake_case , pad_to_multiple_of=10 )
_snake_case: Optional[Any] = input_a[input_name]
_snake_case: Optional[int] = feat_extract.pad(__snake_case , padding='longest' , pad_to_multiple_of=10 )
_snake_case: List[Any] = input_a[input_name]
_snake_case: Tuple = feat_extract.pad(
__snake_case , padding='max_length' , pad_to_multiple_of=10 , max_length=__snake_case )
_snake_case: List[Any] = input_a[input_name]
_snake_case: Optional[int] = feat_extract.pad(
__snake_case , padding='max_length' , pad_to_multiple_of=10 , max_length=__snake_case , return_tensors='np' , )
_snake_case: Optional[int] = input_a[input_name]
self.assertTrue(all(len(__snake_case ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(__snake_case , __snake_case ) )
_snake_case: str = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(__snake_case ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
_snake_case: int = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def SCREAMING_SNAKE_CASE_ ( self : int , __snake_case : Any=False ):
'''simple docstring'''
def _inputs_have_equal_length(__snake_case : Any ):
_snake_case: Tuple = len(input[0] )
for input_slice in input[1:]:
if len(__snake_case ) != length:
return False
return True
def _inputs_are_equal(__snake_case : List[str] , __snake_case : Union[str, Any] ):
if len(__snake_case ) != len(__snake_case ):
return False
for input_slice_a, input_slice_a in zip(__snake_case , __snake_case ):
if not np.allclose(np.asarray(__snake_case ) , np.asarray(__snake_case ) , atol=1e-3 ):
return False
return True
_snake_case: List[str] = self.feature_extraction_class(**self.feat_extract_dict )
_snake_case: Dict = self.feat_extract_tester.prepare_inputs_for_common(numpify=__snake_case )
_snake_case: Any = feat_extract.model_input_names[0]
_snake_case: Union[str, Any] = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
_snake_case: int = feat_extract.pad(
__snake_case , padding='max_length' , max_length=len(speech_inputs[0] ) , truncation=__snake_case )
_snake_case: List[str] = input_a[input_name]
_snake_case: Any = feat_extract.pad(__snake_case , padding='max_length' , max_length=len(speech_inputs[0] ) )
_snake_case: List[str] = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(__snake_case ) )
self.assertFalse(_inputs_have_equal_length(__snake_case ) )
# truncate to smallest with np
_snake_case: List[str] = feat_extract.pad(
__snake_case , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' , truncation=__snake_case , )
_snake_case: Any = input_a[input_name]
_snake_case: List[Any] = feat_extract.pad(
__snake_case , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' )
_snake_case: Union[str, Any] = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(__snake_case ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(__snake_case ) )
# truncate to middle
_snake_case: str = feat_extract.pad(
__snake_case , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=__snake_case , return_tensors='np' , )
_snake_case: str = input_a[input_name]
_snake_case: Any = feat_extract.pad(
__snake_case , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=__snake_case )
_snake_case: List[str] = input_a[input_name]
_snake_case: Union[str, Any] = feat_extract.pad(
__snake_case , padding='max_length' , max_length=len(speech_inputs[1] ) , return_tensors='np' )
_snake_case: int = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(__snake_case ) )
self.assertTrue(_inputs_have_equal_length(__snake_case ) )
self.assertTrue(_inputs_are_equal(__snake_case , __snake_case ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(__snake_case ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__snake_case ):
feat_extract.pad(__snake_case , truncation=__snake_case )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__snake_case ):
feat_extract.pad(__snake_case , padding='longest' , truncation=__snake_case )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__snake_case ):
feat_extract.pad(__snake_case , padding='longest' , truncation=__snake_case )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(__snake_case ):
feat_extract.pad(__snake_case , padding='max_length' , truncation=__snake_case )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
_snake_case: Optional[int] = 12
_snake_case: int = feat_extract.pad(
__snake_case , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=__snake_case , truncation=__snake_case , )
_snake_case: List[str] = input_a[input_name]
_snake_case: Tuple = feat_extract.pad(
__snake_case , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=__snake_case , )
_snake_case: Tuple = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
_snake_case: Tuple = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
_snake_case: Tuple = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(__snake_case ) )
self.assertFalse(_inputs_have_equal_length(__snake_case ) )
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
self._check_padding(numpify=__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
self._check_padding(numpify=__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
self._check_truncation(numpify=__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
self._check_truncation(numpify=__snake_case )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case: Tuple = self.feature_extraction_class(**self.feat_extract_dict )
_snake_case: str = self.feat_extract_tester.prepare_inputs_for_common()
_snake_case: List[Any] = feat_extract.model_input_names[0]
_snake_case: Optional[Any] = BatchFeature({input_name: speech_inputs} )
_snake_case: str = feat_extract.pad(__snake_case , padding='longest' , return_tensors='np' )[input_name]
_snake_case: str = feat_extract.pad(__snake_case , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
_snake_case: Dict = self.feature_extraction_class(**self.feat_extract_dict )
_snake_case: str = self.feat_extract_tester.prepare_inputs_for_common()
_snake_case: Union[str, Any] = feat_extract.model_input_names[0]
_snake_case: Tuple = BatchFeature({input_name: speech_inputs} )
_snake_case: List[Any] = feat_extract.pad(__snake_case , padding='longest' , return_tensors='np' )[input_name]
_snake_case: int = feat_extract.pad(__snake_case , padding='longest' , return_tensors='tf' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case: Optional[int] = self.feat_extract_dict
_snake_case: Optional[Any] = True
_snake_case: Tuple = self.feature_extraction_class(**__snake_case )
_snake_case: Any = self.feat_extract_tester.prepare_inputs_for_common()
_snake_case: List[str] = [len(__snake_case ) for x in speech_inputs]
_snake_case: Optional[Any] = feat_extract.model_input_names[0]
_snake_case: Optional[int] = BatchFeature({input_name: speech_inputs} )
_snake_case: Optional[Any] = feat_extract.pad(__snake_case , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , __snake_case )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , __snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case: Any = self.feat_extract_dict
_snake_case: Any = True
_snake_case: str = self.feature_extraction_class(**__snake_case )
_snake_case: Optional[int] = self.feat_extract_tester.prepare_inputs_for_common()
_snake_case: int = [len(__snake_case ) for x in speech_inputs]
_snake_case: Optional[Any] = feat_extract.model_input_names[0]
_snake_case: Optional[int] = BatchFeature({input_name: speech_inputs} )
_snake_case: Optional[Any] = min(__snake_case )
_snake_case: Union[str, Any] = feat_extract.pad(
__snake_case , padding='max_length' , max_length=__snake_case , truncation=__snake_case , return_tensors='np' )
self.assertIn('attention_mask' , __snake_case )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 273 |
'''simple docstring'''
A : List[str] = '\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
A : List[str] = [{'type': 'code', 'content': INSTALL_CONTENT}]
A : Dict = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 273 | 1 |
import math
import flax.linen as nn
import jax.numpy as jnp
def __lowerCAmelCase ( A_ : jnp.ndarray , A_ : int , A_ : float = 1 , A_ : float = 1 , A_ : float = 1.0e4 , A_ : bool = False , A_ : float = 1.0 , ) -> jnp.ndarray:
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F'''Embedding dimension {embedding_dim} should be even'''
__UpperCAmelCase = float(embedding_dim // 2 )
__UpperCAmelCase = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
__UpperCAmelCase = min_timescale * jnp.exp(jnp.arange(A_ , dtype=jnp.floataa ) * -log_timescale_increment )
__UpperCAmelCase = jnp.expand_dims(A_ , 1 ) * jnp.expand_dims(A_ , 0 )
# scale embeddings
__UpperCAmelCase = scale * emb
if flip_sin_to_cos:
__UpperCAmelCase = jnp.concatenate([jnp.cos(A_ ), jnp.sin(A_ )] , axis=1 )
else:
__UpperCAmelCase = jnp.concatenate([jnp.sin(A_ ), jnp.cos(A_ )] , axis=1 )
__UpperCAmelCase = jnp.reshape(A_ , [jnp.shape(A_ )[0], embedding_dim] )
return signal
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
lowerCAmelCase__ : int = 32
lowerCAmelCase__ : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self: Union[str, Any] , __lowerCAmelCase: str ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_1" )(__lowerCAmelCase )
__UpperCAmelCase = nn.silu(__lowerCAmelCase )
__UpperCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_2" )(__lowerCAmelCase )
return temb
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
lowerCAmelCase__ : int = 32
lowerCAmelCase__ : bool = False
lowerCAmelCase__ : float = 1
@nn.compact
def __call__( self: List[str] , __lowerCAmelCase: Optional[int] ) -> Dict:
'''simple docstring'''
return get_sinusoidal_embeddings(
__lowerCAmelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 221 | import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __lowerCAmelCase ( A_ : Union[str, Any] , A_ : Optional[Any]=10 ) -> Optional[int]:
__UpperCAmelCase = []
for _ in range(A_ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __lowerCAmelCase ( A_ : str , A_ : List[Any]=10 ) -> List[Any]:
__UpperCAmelCase = []
for step in range(A_ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase = os.path.join(A_ , "schedule.bin" )
torch.save(scheduler.state_dict() , A_ )
__UpperCAmelCase = torch.load(A_ )
scheduler.load_state_dict(A_ )
return lrs
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self: List[str] , __lowerCAmelCase: List[str] , __lowerCAmelCase: Optional[Any] , __lowerCAmelCase: Dict ) -> str:
'''simple docstring'''
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for a, b in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertAlmostEqual(__lowerCAmelCase , __lowerCAmelCase , delta=__lowerCAmelCase )
def _UpperCAmelCase ( self: Optional[int] ) -> str:
'''simple docstring'''
__UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__lowerCAmelCase )
__UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
__UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__UpperCAmelCase = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
__UpperCAmelCase = criterion(__lowerCAmelCase , __lowerCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def _UpperCAmelCase ( self: Optional[int] ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__lowerCAmelCase )
__UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
__UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__UpperCAmelCase = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=__lowerCAmelCase , weight_decay=0.0 , relative_step=__lowerCAmelCase , scale_parameter=__lowerCAmelCase , warmup_init=__lowerCAmelCase , )
for _ in range(1_000 ):
__UpperCAmelCase = criterion(__lowerCAmelCase , __lowerCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = nn.Linear(50 , 50 ) if is_torch_available() else None
lowerCAmelCase__ : Optional[int] = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
lowerCAmelCase__ : Optional[int] = 10
def _UpperCAmelCase ( self: Optional[int] , __lowerCAmelCase: List[str] , __lowerCAmelCase: Any , __lowerCAmelCase: List[Any] , __lowerCAmelCase: int=None ) -> List[Any]:
'''simple docstring'''
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for a, b in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertAlmostEqual(__lowerCAmelCase , __lowerCAmelCase , delta=__lowerCAmelCase , msg=__lowerCAmelCase )
def _UpperCAmelCase ( self: Union[str, Any] ) -> Any:
'''simple docstring'''
__UpperCAmelCase = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
__UpperCAmelCase = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
__UpperCAmelCase , __UpperCAmelCase = data
__UpperCAmelCase = scheduler_func(self.optimizer , **__lowerCAmelCase )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
__UpperCAmelCase = unwrap_schedule(__lowerCAmelCase , self.num_steps )
self.assertListAlmostEqual(
__lowerCAmelCase , __lowerCAmelCase , tol=1E-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , )
__UpperCAmelCase = scheduler_func(self.optimizer , **__lowerCAmelCase )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(__lowerCAmelCase ) # wrap to test picklability of the schedule
__UpperCAmelCase = unwrap_and_save_reload_schedule(__lowerCAmelCase , self.num_steps )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase , msg=F'''failed for {scheduler_func} in save and reload''' )
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self: Union[str, Any] , __lowerCAmelCase: Tuple ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = fn
def __call__( self: int , *__lowerCAmelCase: List[str] , **__lowerCAmelCase: Any ) -> List[Any]:
'''simple docstring'''
return self.fn(*__lowerCAmelCase , **__lowerCAmelCase )
@classmethod
def _UpperCAmelCase ( self: Optional[Any] , __lowerCAmelCase: List[Any] ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = list(map(self , scheduler.lr_lambdas ) )
| 221 | 1 |
from __future__ import annotations
from random import choice
def __a ( __lowerCAmelCase ) -> int:
return choice(__lowerCAmelCase )
def __a ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE : str = random_pivot(__lowerCAmelCase )
# partition based on pivot
# linear time
SCREAMING_SNAKE_CASE : str = [e for e in lst if e < pivot]
SCREAMING_SNAKE_CASE : int = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(__lowerCAmelCase ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(__lowerCAmelCase ) < k - 1:
return kth_number(__lowerCAmelCase , k - len(__lowerCAmelCase ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod() | 308 |
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class lowercase :
'''simple docstring'''
def __init__( self : Tuple , snake_case : Union[str, Any] ):
'''simple docstring'''
if isinstance(snake_case , snake_case ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
SCREAMING_SNAKE_CASE : int = deepcopy(snake_case )
elif os.path.exists(snake_case ):
with io.open(snake_case , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE : List[str] = json.load(snake_case )
else:
try:
SCREAMING_SNAKE_CASE : Union[str, Any] = baseaa.urlsafe_baadecode(snake_case ).decode('utf-8' )
SCREAMING_SNAKE_CASE : Any = json.loads(snake_case )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f'''Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}''' )
SCREAMING_SNAKE_CASE : Tuple = config
self.set_stage_and_offload()
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.get_value('zero_optimization.stage' , -1 )
# offload
SCREAMING_SNAKE_CASE : int = False
if self.is_zeroa() or self.is_zeroa():
SCREAMING_SNAKE_CASE : Union[str, Any] = set(['cpu', 'nvme'] )
SCREAMING_SNAKE_CASE : Tuple = set(
[
self.get_value('zero_optimization.offload_optimizer.device' ),
self.get_value('zero_optimization.offload_param.device' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
SCREAMING_SNAKE_CASE : List[Any] = True
def lowerCamelCase_ ( self : List[str] , snake_case : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.config
# find the config node of interest if it exists
SCREAMING_SNAKE_CASE : List[str] = ds_key_long.split('.' )
SCREAMING_SNAKE_CASE : Union[str, Any] = nodes.pop()
for node in nodes:
SCREAMING_SNAKE_CASE : List[str] = config.get(snake_case )
if config is None:
return None, ds_key
return config, ds_key
def lowerCamelCase_ ( self : Dict , snake_case : Any , snake_case : Any=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.find_config_node(snake_case )
if config is None:
return default
return config.get(snake_case , snake_case )
def lowerCamelCase_ ( self : Union[str, Any] , snake_case : Union[str, Any] , snake_case : Tuple=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.config
# find the config node of interest if it exists
SCREAMING_SNAKE_CASE : str = ds_key_long.split('.' )
for node in nodes:
SCREAMING_SNAKE_CASE : List[Any] = config
SCREAMING_SNAKE_CASE : List[Any] = config.get(snake_case )
if config is None:
if must_exist:
raise ValueError(f'''Can\'t find {ds_key_long} entry in the config: {self.config}''' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(snake_case )
def lowerCamelCase_ ( self : Optional[int] , snake_case : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.get_value(snake_case )
return False if value is None else bool(snake_case )
def lowerCamelCase_ ( self : Dict , snake_case : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_value(snake_case )
return False if value is None else not bool(snake_case )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return self._stage == 2
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return self._stage == 3
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return self._offload
class lowercase :
'''simple docstring'''
def __init__( self : Optional[int] , snake_case : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = engine
def lowerCamelCase_ ( self : str , snake_case : Optional[int] , **snake_case : Any ):
'''simple docstring'''
self.engine.backward(snake_case , **snake_case )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class lowercase ( SCREAMING_SNAKE_CASE_):
'''simple docstring'''
def __init__( self : Any , snake_case : int ):
'''simple docstring'''
super().__init__(snake_case , device_placement=snake_case , scaler=snake_case )
SCREAMING_SNAKE_CASE : Dict = hasattr(self.optimizer , 'overflow' )
def lowerCamelCase_ ( self : Optional[int] , snake_case : Optional[Any]=None ):
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
if self.__has_overflow__:
return self.optimizer.overflow
return False
class lowercase ( SCREAMING_SNAKE_CASE_):
'''simple docstring'''
def __init__( self : Optional[Any] , snake_case : int , snake_case : Any ):
'''simple docstring'''
super().__init__(snake_case , snake_case )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class lowercase :
'''simple docstring'''
def __init__( self : Tuple , snake_case : Optional[Any] , snake_case : Any=0.001 , snake_case : Tuple=0 , **snake_case : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = params
SCREAMING_SNAKE_CASE : Optional[int] = lr
SCREAMING_SNAKE_CASE : Tuple = weight_decay
SCREAMING_SNAKE_CASE : int = kwargs
class lowercase :
'''simple docstring'''
def __init__( self : Optional[Any] , snake_case : int , snake_case : Optional[int]=None , snake_case : Any=0 , **snake_case : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = optimizer
SCREAMING_SNAKE_CASE : List[Any] = total_num_steps
SCREAMING_SNAKE_CASE : Optional[Any] = warmup_num_steps
SCREAMING_SNAKE_CASE : int = kwargs | 308 | 1 |
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase : int = logging.get_logger(__name__)
UpperCAmelCase : Union[str, Any] = {
"vocab_file": "vocab.json",
"tokenizer_config_file": "tokenizer_config.json",
"merges_file": "merges.txt",
}
UpperCAmelCase : Optional[int] = {
"vocab_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"
),
},
"tokenizer_config_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"
),
},
"merges_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"
),
},
}
UpperCAmelCase : str = "</w>"
UpperCAmelCase : List[str] = "@@ "
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = set()
lowercase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase_ = char
return pairs
# Speech2Text2 has no max input length
UpperCAmelCase : Dict = {"facebook/s2t-wav2vec2-large-en-de": 1024}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["input_ids", "attention_mask"]
def __init__( self : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int]="<s>" , lowerCAmelCase_ : Union[str, Any]="<pad>" , lowerCAmelCase_ : int="</s>" , lowerCAmelCase_ : List[Any]="<unk>" , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Optional[int]=None , **lowerCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(
unk_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , **lowerCAmelCase_ , )
lowercase_ = do_lower_case
with open(lowerCAmelCase_ , encoding="""utf-8""") as vocab_handle:
lowercase_ = json.load(lowerCAmelCase_)
lowercase_ = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''')
lowercase_ = None
lowercase_ = None
else:
with open(lowerCAmelCase_ , encoding="""utf-8""") as merges_handle:
lowercase_ = merges_handle.read().split("""\n""")[:-1]
lowercase_ = [tuple(merge.split()[:2]) for merge in merges]
lowercase_ = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_))))
lowercase_ = {}
@property
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
return len(self.decoder)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder)
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
lowercase_ = tuple(token[:-1]) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
lowercase_ = get_pairs(lowerCAmelCase_)
if not pairs:
return token
while True:
lowercase_ = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_: self.bpe_ranks.get(lowerCAmelCase_ , float("""inf""")))
if bigram not in self.bpe_ranks:
break
lowercase_ , lowercase_ = bigram
lowercase_ = []
lowercase_ = 0
while i < len(lowerCAmelCase_):
try:
lowercase_ = word.index(lowerCAmelCase_ , lowerCAmelCase_)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
lowercase_ = j
if word[i] == first and i < len(lowerCAmelCase_) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
lowercase_ = tuple(lowerCAmelCase_)
lowercase_ = new_word
if len(lowerCAmelCase_) == 1:
break
else:
lowercase_ = get_pairs(lowerCAmelCase_)
lowercase_ = """ """.join(lowerCAmelCase_)
if word == "\n " + BPE_TOKEN_MERGES:
lowercase_ = """\n""" + BPE_TOKEN_MERGES
if word.endswith(lowerCAmelCase_):
lowercase_ = word.replace(lowerCAmelCase_ , """""")
lowercase_ = word.replace(""" """ , lowerCAmelCase_)
lowercase_ = word
return word
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
if self.bpe_ranks is None:
raise ValueError(
"""This tokenizer was instantiated without a `merges.txt` file, so"""
""" that it can only be used for decoding, not for encoding."""
"""Make sure to provide `merges.txt` file at instantiation to enable """
"""encoding.""")
if self.do_lower_case:
lowercase_ = text.lower()
lowercase_ = text.split()
lowercase_ = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(lowerCAmelCase_).split(""" """)))
return split_tokens
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : str):
"""simple docstring"""
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token))
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = self.decoder.get(lowerCAmelCase_ , self.unk_token)
return result
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = """ """.join(lowerCAmelCase_)
# make sure @@ tokens are concatenated
lowercase_ = """""".join(string.split(lowerCAmelCase_))
return string
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase_):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
lowercase_ = os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
lowercase_ = os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""])
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_) + """\n""")
lowercase_ = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""") as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_: kv[1]):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""")
lowercase_ = token_index
writer.write(""" """.join(lowerCAmelCase_) + """\n""")
index += 1
return (vocab_file, merges_file)
| 567 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class SCREAMING_SNAKE_CASE__ :
lowercase__ = 42
lowercase__ = None
lowercase__ = None
UpperCAmelCase : Dict = namedtuple("CoinsDistribResult", "moves excess")
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(__lowerCAmelCase ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__lowerCAmelCase ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__lowerCAmelCase ) != count_coins(__lowerCAmelCase ):
raise ValueError("""The nodes number should be same as the number of coins""" )
# Main calculation
def get_distrib(__lowerCAmelCase ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowercase_ , lowercase_ = get_distrib(node.left )
lowercase_ , lowercase_ = get_distrib(node.right )
lowercase_ = 1 - left_distrib_excess
lowercase_ = 1 - right_distrib_excess
lowercase_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(__lowerCAmelCase )
+ abs(__lowerCAmelCase )
)
lowercase_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__lowerCAmelCase , __lowerCAmelCase )
return get_distrib(__lowerCAmelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 567 | 1 |
'''simple docstring'''
def _a ( _lowercase : int , _lowercase : int ):
'''simple docstring'''
return number | (1 << position)
def _a ( _lowercase : int , _lowercase : int ):
'''simple docstring'''
return number & ~(1 << position)
def _a ( _lowercase : int , _lowercase : int ):
'''simple docstring'''
return number ^ (1 << position)
def _a ( _lowercase : int , _lowercase : int ):
'''simple docstring'''
return ((number >> position) & 1) == 1
def _a ( _lowercase : int , _lowercase : int ):
'''simple docstring'''
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 710 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
__UpperCAmelCase :Optional[Any] = logging.get_logger(__name__)
# General docstring
__UpperCAmelCase :List[Any] = "RegNetConfig"
# Base docstring
__UpperCAmelCase :List[Any] = "facebook/regnet-y-040"
__UpperCAmelCase :Union[str, Any] = [1, 1_0_8_8, 7, 7]
# Image classification docstring
__UpperCAmelCase :int = "facebook/regnet-y-040"
__UpperCAmelCase :Optional[Any] = "tabby, tabby cat"
__UpperCAmelCase :Dict = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : str , snake_case : int , snake_case : int = 3 , snake_case : int = 1 , snake_case : int = 1 , snake_case : Optional[str] = "relu" , **snake_case : Any , ) -> Union[str, Any]:
super().__init__(**snake_case )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__UpperCAmelCase : Union[str, Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
__UpperCAmelCase : List[Any] = tf.keras.layers.ConvaD(
filters=snake_case , kernel_size=snake_case , strides=snake_case , padding='''VALID''' , groups=snake_case , use_bias=snake_case , name='''convolution''' , )
__UpperCAmelCase : List[Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='''normalization''' )
__UpperCAmelCase : Any = ACTaFN[activation] if activation is not None else tf.identity
def lowerCamelCase__ ( self : Any , snake_case : List[str] ) -> int:
__UpperCAmelCase : Tuple = self.convolution(self.padding(snake_case ) )
__UpperCAmelCase : List[Any] = self.normalization(snake_case )
__UpperCAmelCase : Optional[Any] = self.activation(snake_case )
return hidden_state
class a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : str , snake_case : RegNetConfig , **snake_case : Tuple ) -> int:
super().__init__(**snake_case )
__UpperCAmelCase : List[str] = config.num_channels
__UpperCAmelCase : Optional[int] = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def lowerCamelCase__ ( self : Optional[int] , snake_case : Dict ) -> int:
__UpperCAmelCase : int = shape_list(snake_case )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__UpperCAmelCase : Dict = tf.transpose(snake_case , perm=(0, 2, 3, 1) )
__UpperCAmelCase : List[str] = self.embedder(snake_case )
return hidden_state
class a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Any , snake_case : int , snake_case : int = 2 , **snake_case : Tuple ) -> str:
super().__init__(**snake_case )
__UpperCAmelCase : str = tf.keras.layers.ConvaD(
filters=snake_case , kernel_size=1 , strides=snake_case , use_bias=snake_case , name='''convolution''' )
__UpperCAmelCase : int = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='''normalization''' )
def lowerCamelCase__ ( self : str , snake_case : tf.Tensor , snake_case : bool = False ) -> tf.Tensor:
return self.normalization(self.convolution(snake_case ) , training=snake_case )
class a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Tuple , snake_case : int , snake_case : int , **snake_case : Tuple ) -> List[Any]:
super().__init__(**snake_case )
__UpperCAmelCase : List[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case , name='''pooler''' )
__UpperCAmelCase : Dict = [
tf.keras.layers.ConvaD(filters=snake_case , kernel_size=1 , activation='''relu''' , name='''attention.0''' ),
tf.keras.layers.ConvaD(filters=snake_case , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ),
]
def lowerCamelCase__ ( self : Optional[int] , snake_case : Tuple ) -> Union[str, Any]:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
__UpperCAmelCase : str = self.pooler(snake_case )
for layer_module in self.attention:
__UpperCAmelCase : int = layer_module(snake_case )
__UpperCAmelCase : List[Any] = hidden_state * pooled
return hidden_state
class a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Any , snake_case : RegNetConfig , snake_case : int , snake_case : int , snake_case : int = 1 , **snake_case : int ) -> int:
super().__init__(**snake_case )
__UpperCAmelCase : Any = in_channels != out_channels or stride != 1
__UpperCAmelCase : Optional[int] = max(1 , out_channels // config.groups_width )
__UpperCAmelCase : Optional[int] = (
TFRegNetShortCut(snake_case , stride=snake_case , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__UpperCAmelCase : List[Any] = [
TFRegNetConvLayer(snake_case , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
snake_case , stride=snake_case , groups=snake_case , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetConvLayer(snake_case , kernel_size=1 , activation=snake_case , name='''layer.2''' ),
]
__UpperCAmelCase : Union[str, Any] = ACTaFN[config.hidden_act]
def lowerCamelCase__ ( self : Union[str, Any] , snake_case : Optional[Any] ) -> List[str]:
__UpperCAmelCase : Union[str, Any] = hidden_state
for layer_module in self.layers:
__UpperCAmelCase : Any = layer_module(snake_case )
__UpperCAmelCase : Tuple = self.shortcut(snake_case )
hidden_state += residual
__UpperCAmelCase : Optional[int] = self.activation(snake_case )
return hidden_state
class a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : List[str] , snake_case : RegNetConfig , snake_case : int , snake_case : int , snake_case : int = 1 , **snake_case : List[str] ) -> Optional[int]:
super().__init__(**snake_case )
__UpperCAmelCase : List[str] = in_channels != out_channels or stride != 1
__UpperCAmelCase : Optional[Any] = max(1 , out_channels // config.groups_width )
__UpperCAmelCase : Any = (
TFRegNetShortCut(snake_case , stride=snake_case , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
__UpperCAmelCase : List[str] = [
TFRegNetConvLayer(snake_case , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
snake_case , stride=snake_case , groups=snake_case , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetSELayer(snake_case , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ),
TFRegNetConvLayer(snake_case , kernel_size=1 , activation=snake_case , name='''layer.3''' ),
]
__UpperCAmelCase : Dict = ACTaFN[config.hidden_act]
def lowerCamelCase__ ( self : Optional[Any] , snake_case : Tuple ) -> Any:
__UpperCAmelCase : Optional[int] = hidden_state
for layer_module in self.layers:
__UpperCAmelCase : Any = layer_module(snake_case )
__UpperCAmelCase : int = self.shortcut(snake_case )
hidden_state += residual
__UpperCAmelCase : Optional[int] = self.activation(snake_case )
return hidden_state
class a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Optional[int] , snake_case : RegNetConfig , snake_case : int , snake_case : int , snake_case : int = 2 , snake_case : int = 2 , **snake_case : str ) -> Optional[Any]:
super().__init__(**snake_case )
__UpperCAmelCase : str = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
__UpperCAmelCase : str = [
# downsampling is done in the first layer with stride of 2
layer(snake_case , snake_case , snake_case , stride=snake_case , name='''layers.0''' ),
*[layer(snake_case , snake_case , snake_case , name=f'layers.{i+1}' ) for i in range(depth - 1 )],
]
def lowerCamelCase__ ( self : List[str] , snake_case : Any ) -> List[Any]:
for layer_module in self.layers:
__UpperCAmelCase : Optional[Any] = layer_module(snake_case )
return hidden_state
class a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Any , snake_case : RegNetConfig , **snake_case : int ) -> str:
super().__init__(**snake_case )
__UpperCAmelCase : Dict = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
snake_case , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) )
__UpperCAmelCase : Optional[Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(snake_case , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(snake_case , snake_case , snake_case , depth=snake_case , name=f'stages.{i+1}' ) )
def lowerCamelCase__ ( self : int , snake_case : tf.Tensor , snake_case : bool = False , snake_case : bool = True ) -> TFBaseModelOutputWithNoAttention:
__UpperCAmelCase : Any = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__UpperCAmelCase : Any = hidden_states + (hidden_state,)
__UpperCAmelCase : List[Any] = stage_module(snake_case )
if output_hidden_states:
__UpperCAmelCase : Optional[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=snake_case , hidden_states=snake_case )
@keras_serializable
class a ( tf.keras.layers.Layer ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = RegNetConfig
def __init__( self : Dict , snake_case : str , **snake_case : Optional[int] ) -> Any:
super().__init__(**snake_case )
__UpperCAmelCase : List[Any] = config
__UpperCAmelCase : List[str] = TFRegNetEmbeddings(snake_case , name='''embedder''' )
__UpperCAmelCase : List[str] = TFRegNetEncoder(snake_case , name='''encoder''' )
__UpperCAmelCase : List[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case , name='''pooler''' )
@unpack_inputs
def lowerCamelCase__ ( self : Dict , snake_case : tf.Tensor , snake_case : Optional[bool] = None , snake_case : Optional[bool] = None , snake_case : bool = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
__UpperCAmelCase : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCAmelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCAmelCase : Optional[Any] = self.embedder(snake_case , training=snake_case )
__UpperCAmelCase : Optional[int] = self.encoder(
snake_case , output_hidden_states=snake_case , return_dict=snake_case , training=snake_case )
__UpperCAmelCase : List[str] = encoder_outputs[0]
__UpperCAmelCase : str = self.pooler(snake_case )
# Change to NCHW output format have uniformity in the modules
__UpperCAmelCase : Optional[Any] = tf.transpose(snake_case , perm=(0, 3, 1, 2) )
__UpperCAmelCase : str = tf.transpose(snake_case , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__UpperCAmelCase : Dict = tuple([tf.transpose(snake_case , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=snake_case , pooler_output=snake_case , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class a ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = RegNetConfig
SCREAMING_SNAKE_CASE : Tuple = "regnet"
SCREAMING_SNAKE_CASE : List[Any] = "pixel_values"
@property
def lowerCamelCase__ ( self : int ) -> List[str]:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
__UpperCAmelCase :Optional[int] = r"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"
__UpperCAmelCase :List[Any] = r"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , _a , )
class a ( _a ):
"""simple docstring"""
def __init__( self : List[Any] , snake_case : RegNetConfig , *snake_case : Optional[int] , **snake_case : List[str] ) -> Tuple:
super().__init__(snake_case , *snake_case , **snake_case )
__UpperCAmelCase : Dict = TFRegNetMainLayer(snake_case , name='''regnet''' )
@unpack_inputs
@add_start_docstrings_to_model_forward(snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCamelCase__ ( self : Tuple , snake_case : tf.Tensor , snake_case : Optional[bool] = None , snake_case : Optional[bool] = None , snake_case : str=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
__UpperCAmelCase : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCAmelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCAmelCase : Dict = self.regnet(
pixel_values=snake_case , output_hidden_states=snake_case , return_dict=snake_case , training=snake_case , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , _a , )
class a ( _a , _a ):
"""simple docstring"""
def __init__( self : Tuple , snake_case : RegNetConfig , *snake_case : Optional[Any] , **snake_case : List[Any] ) -> List[Any]:
super().__init__(snake_case , *snake_case , **snake_case )
__UpperCAmelCase : List[Any] = config.num_labels
__UpperCAmelCase : Optional[int] = TFRegNetMainLayer(snake_case , name='''regnet''' )
# classification head
__UpperCAmelCase : str = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCamelCase__ ( self : Tuple , snake_case : tf.Tensor = None , snake_case : tf.Tensor = None , snake_case : bool = None , snake_case : bool = None , snake_case : Tuple=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
__UpperCAmelCase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCAmelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCAmelCase : Optional[int] = self.regnet(
snake_case , output_hidden_states=snake_case , return_dict=snake_case , training=snake_case )
__UpperCAmelCase : str = outputs.pooler_output if return_dict else outputs[1]
__UpperCAmelCase : Tuple = self.classifier[0](snake_case )
__UpperCAmelCase : Tuple = self.classifier[1](snake_case )
__UpperCAmelCase : Any = None if labels is None else self.hf_compute_loss(labels=snake_case , logits=snake_case )
if not return_dict:
__UpperCAmelCase : List[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=snake_case , logits=snake_case , hidden_states=outputs.hidden_states ) | 266 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A = logging.get_logger(__name__)
A = {
'''facebook/convnextv2-tiny-1k-224''': '''https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json''',
}
class __lowercase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''convnextv2'''
def __init__( self , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=224 , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : List[str] = num_channels
__a : str = patch_size
__a : Dict = num_stages
__a : List[str] = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
__a : List[str] = [3, 3, 9, 3] if depths is None else depths
__a : List[Any] = hidden_act
__a : Any = initializer_range
__a : Optional[int] = layer_norm_eps
__a : List[Any] = drop_path_rate
__a : Any = image_size
__a : str = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
__a , __a : Optional[int] = get_aligned_output_features_output_indices(
out_features=_UpperCAmelCase , out_indices=_UpperCAmelCase , stage_names=self.stage_names ) | 52 |
"""simple docstring"""
from __future__ import annotations
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
__a , __a : List[Any] = text, pattern
__a , __a : Tuple = len(_UpperCAmelCase ), len(_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def _lowerCamelCase ( self , _UpperCAmelCase ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def _lowerCamelCase ( self ):
# searches pattern in text and returns index positions
__a : Dict = []
for i in range(self.textLen - self.patLen + 1 ):
__a : List[str] = self.mismatch_in_text(_UpperCAmelCase )
if mismatch_index == -1:
positions.append(_UpperCAmelCase )
else:
__a : Tuple = self.match_in_pattern(self.text[mismatch_index] )
__a : Optional[int] = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
A = '''ABAABA'''
A = '''AB'''
A = BoyerMooreSearch(text, pattern)
A = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions) | 52 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowerCAmelCase__ ( metaclass=UpperCAmelCase_ ):
lowercase__ : Union[str, Any] = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def lowercase_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def lowercase_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
class lowerCAmelCase__ ( metaclass=UpperCAmelCase_ ):
lowercase__ : Dict = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def lowercase_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def lowercase_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
class lowerCAmelCase__ ( metaclass=UpperCAmelCase_ ):
lowercase__ : List[str] = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def lowercase_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def lowercase_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
class lowerCAmelCase__ ( metaclass=UpperCAmelCase_ ):
lowercase__ : str = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def lowercase_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def lowercase_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
class lowerCAmelCase__ ( metaclass=UpperCAmelCase_ ):
lowercase__ : int = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def lowercase_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def lowercase_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
class lowerCAmelCase__ ( metaclass=UpperCAmelCase_ ):
lowercase__ : Dict = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def lowercase_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def lowercase_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] ) | 261 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__UpperCAmelCase =logging.getLogger(__name__)
@dataclass
class lowerCAmelCase__ :
lowercase__ : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowercase__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase__ : Optional[str] = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
lowercase__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowercase__ : bool = field(default=UpperCAmelCase_ , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowercase__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class lowerCAmelCase__ :
lowercase__ : str = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
lowercase__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
lowercase__ : int = field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowercase__ : bool = field(
default=UpperCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __a ( ) -> Dict:
'''simple docstring'''
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A__ , A__ , A__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A__ , A__ , A__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
A__ = import_module("tasks" )
try:
A__ = getattr(A , model_args.task_type )
A__ = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , A )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
A__ = token_classification_task.get_labels(data_args.labels )
A__ = dict(enumerate(A ) )
A__ = len(A )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=A , idalabel=A , labelaid={label: i for i, label in enumerate(A )} , cache_dir=model_args.cache_dir , )
A__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
A__ = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , )
# Get datasets
A__ = (
TokenClassificationDataset(
token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
A__ = (
TokenClassificationDataset(
token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(A , A ) -> Tuple[List[int], List[int]]:
A__ = np.argmax(A , axis=2 )
A__ , A__ = preds.shape
A__ = [[] for _ in range(A )]
A__ = [[] for _ in range(A )]
for i in range(A ):
for j in range(A ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(A ) -> Dict:
A__ , A__ = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(A , A ),
"precision": precision_score(A , A ),
"recall": recall_score(A , A ),
"f1": fa_score(A , A ),
}
# Data collator
A__ = DataCollatorWithPadding(A , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
A__ = Trainer(
model=A , args=A , train_dataset=A , eval_dataset=A , compute_metrics=A , data_collator=A , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A__ = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
A__ = trainer.evaluate()
A__ = os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_process_zero():
with open(A , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , A , A )
writer.write("%s = %s\n" % (key, value) )
results.update(A )
# Predict
if training_args.do_predict:
A__ = TokenClassificationDataset(
token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
A__ , A__ , A__ = trainer.predict(A )
A__ , A__ = align_predictions(A , A )
A__ = os.path.join(training_args.output_dir , "test_results.txt" )
if trainer.is_world_process_zero():
with open(A , "w" ) as writer:
for key, value in metrics.items():
logger.info(" %s = %s" , A , A )
writer.write("%s = %s\n" % (key, value) )
# Save predictions
A__ = os.path.join(training_args.output_dir , "test_predictions.txt" )
if trainer.is_world_process_zero():
with open(A , "w" ) as writer:
with open(os.path.join(data_args.data_dir , "test.txt" ) , "r" ) as f:
token_classification_task.write_predictions_to_file(A , A , A )
return results
def __a ( A ) -> Union[str, Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main() | 261 | 1 |
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowerCamelCase : Any = "hf-internal-testing/tiny-random-bert"
lowerCamelCase : Optional[int] = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert")
lowerCamelCase : str = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = cached_file(A_ , A_ )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(A_ ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(A_ , A_ ) ) )
with open(os.path.join(A_ , 'refs' , 'main' ) ) as f:
lowerCamelCase_ = f.read()
self.assertEqual(A_ , os.path.join(A_ , 'snapshots' , A_ , A_ ) )
self.assertTrue(os.path.isfile(A_ ) )
# File is cached at the same place the second time.
lowerCamelCase_ = cached_file(A_ , A_ )
self.assertEqual(A_ , A_ )
# Using a specific revision to test the full commit hash.
lowerCamelCase_ = cached_file(A_ , A_ , revision='9b8c223' )
self.assertEqual(A_ , os.path.join(A_ , 'snapshots' , A_ , A_ ) )
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
with self.assertRaisesRegex(A_ , 'is not a valid model identifier' ):
lowerCamelCase_ = cached_file('tiny-random-bert' , A_ )
with self.assertRaisesRegex(A_ , 'is not a valid git identifier' ):
lowerCamelCase_ = cached_file(A_ , A_ , revision='aaaa' )
with self.assertRaisesRegex(A_ , 'does not appear to have a file named' ):
lowerCamelCase_ = cached_file(A_ , 'conf' )
def a__ ( self : Dict ) -> str:
"""simple docstring"""
with self.assertRaisesRegex(A_ , 'does not appear to have a file named' ):
lowerCamelCase_ = cached_file(A_ , 'conf' )
with open(os.path.join(A_ , 'refs' , 'main' ) ) as f:
lowerCamelCase_ = f.read()
self.assertTrue(os.path.isfile(os.path.join(A_ , '.no_exist' , A_ , 'conf' ) ) )
lowerCamelCase_ = cached_file(A_ , 'conf' , _raise_exceptions_for_missing_entries=A_ )
self.assertIsNone(A_ )
lowerCamelCase_ = cached_file(A_ , 'conf' , local_files_only=A_ , _raise_exceptions_for_missing_entries=A_ )
self.assertIsNone(A_ )
lowerCamelCase_ = mock.Mock()
lowerCamelCase_ = 500
lowerCamelCase_ = {}
lowerCamelCase_ = HTTPError
lowerCamelCase_ = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=A_ ) as mock_head:
lowerCamelCase_ = cached_file(A_ , 'conf' , _raise_exceptions_for_connection_errors=A_ )
self.assertIsNone(A_ )
# This check we did call the fake head request
mock_head.assert_called()
def a__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only' , A_ ) )
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , A_ ) )
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , A_ ) )
def a__ ( self : Dict ) -> List[str]:
"""simple docstring"""
self.assertIsNone(get_file_from_repo('bert-base-cased' , 'ahah.txt' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(A_ , 'is not a valid model identifier' ):
get_file_from_repo('bert-base-case' , A_ )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(A_ , 'is not a valid git identifier' ):
get_file_from_repo('bert-base-cased' , A_ , revision='ahaha' )
lowerCamelCase_ = get_file_from_repo('bert-base-cased' , A_ )
# The name is the cached name which is not very easy to test, so instead we load the content.
lowerCamelCase_ = json.loads(open(A_ , 'r' ).read() )
self.assertEqual(config['hidden_size'] , 768 )
def a__ ( self : Dict ) -> List[str]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase_ = Path(A_ ) / 'a.txt'
filename.touch()
self.assertEqual(get_file_from_repo(A_ , 'a.txt' ) , str(A_ ) )
self.assertIsNone(get_file_from_repo(A_ , 'b.txt' ) )
| 70 |
from __future__ import annotations
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> list[int]:
lowercase : int = [True] * limit
lowercase : Tuple = False
lowercase : List[Any] = False
lowercase : Union[str, Any] = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
lowercase : Tuple = i * 2
while index < limit:
lowercase : Optional[int] = False
lowercase : Optional[int] = index + i
lowercase : int = [2]
for i in range(3 , SCREAMING_SNAKE_CASE__ , 2 ):
if is_prime[i]:
primes.append(SCREAMING_SNAKE_CASE__ )
return primes
def _snake_case( SCREAMING_SNAKE_CASE__ = 1_000_000 ) -> int:
lowercase : int = prime_sieve(SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = 0
lowercase : Dict = 0
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
for j in range(i + length , len(SCREAMING_SNAKE_CASE__ ) ):
lowercase : Optional[int] = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowercase : Any = j - i
lowercase : int = sol
return largest
if __name__ == "__main__":
print(F'''{solution() = }''')
| 336 | 0 |
'''simple docstring'''
from __future__ import annotations
from cmath import sqrt
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> tuple[complex, complex]:
if a == 0:
raise ValueError('Coefficient \'a\' must not be zero.' )
_lowercase : Tuple = b * b - 4 * a * c
_lowercase : List[Any] = (-b + sqrt(_lowercase )) / (2 * a)
_lowercase : Dict = (-b - sqrt(_lowercase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def __UpperCamelCase ( ) -> List[Any]:
_lowercase , _lowercase : Optional[Any] = quadratic_roots(a=5, b=6, c=1 )
print(f'''The solutions are: {solutiona} and {solutiona}''' )
if __name__ == "__main__":
main()
| 4 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_A : Optional[int] =logging.get_logger(__name__)
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = ["""input_features""", """is_longer"""]
def __init__( self : List[Any] , UpperCamelCase_ : List[Any]=64 , UpperCamelCase_ : int=4_8000 , UpperCamelCase_ : Union[str, Any]=480 , UpperCamelCase_ : Any=10 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[int]=0.0 , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : float = 0 , UpperCamelCase_ : float = 1_4000 , UpperCamelCase_ : int = None , UpperCamelCase_ : str = "fusion" , UpperCamelCase_ : str = "repeatpad" , **UpperCamelCase_ : Optional[Any] , ) -> Dict:
'''simple docstring'''
super().__init__(
feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , )
_lowercase : Tuple = top_db
_lowercase : Any = truncation
_lowercase : str = padding
_lowercase : int = fft_window_size
_lowercase : Any = (fft_window_size >> 1) + 1
_lowercase : int = hop_length
_lowercase : Any = max_length_s
_lowercase : str = max_length_s * sampling_rate
_lowercase : Any = sampling_rate
_lowercase : List[Any] = frequency_min
_lowercase : Tuple = frequency_max
_lowercase : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm=UpperCamelCase_ , mel_scale='htk' , )
_lowercase : Any = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm='slaney' , mel_scale='slaney' , )
def __UpperCAmelCase ( self : Tuple ) -> Dict[str, Any]:
'''simple docstring'''
_lowercase : Tuple = copy.deepcopy(self.__dict__ )
_lowercase : int = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : np.array , UpperCamelCase_ : Optional[np.array] = None ) -> np.ndarray:
'''simple docstring'''
_lowercase : List[str] = spectrogram(
UpperCamelCase_ , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase_ , log_mel='dB' , )
return log_mel_spectrogram.T
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_lowercase : Tuple = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_lowercase : int = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_lowercase : Union[str, Any] = [0]
# randomly choose index for each part
_lowercase : Tuple = np.random.choice(ranges[0] )
_lowercase : int = np.random.choice(ranges[1] )
_lowercase : Any = np.random.choice(ranges[2] )
_lowercase : int = mel[idx_front : idx_front + chunk_frames, :]
_lowercase : int = mel[idx_middle : idx_middle + chunk_frames, :]
_lowercase : Tuple = mel[idx_back : idx_back + chunk_frames, :]
_lowercase : List[Any] = torch.tensor(mel[None, None, :] )
_lowercase : Optional[int] = torch.nn.functional.interpolate(
UpperCamelCase_ , size=[chunk_frames, 64] , mode='bilinear' , align_corners=UpperCamelCase_ )
_lowercase : str = mel_shrink[0][0].numpy()
_lowercase : int = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : np.array , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] ) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_lowercase : Tuple = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_lowercase : Any = len(UpperCamelCase_ ) - max_length
_lowercase : Dict = np.random.randint(0 , overflow + 1 )
_lowercase : Optional[int] = waveform[idx : idx + max_length]
_lowercase : Dict = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_lowercase : List[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters )
_lowercase : List[Any] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_lowercase : Optional[int] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_lowercase : Optional[Any] = np.stack([mel, mel, mel, mel] , axis=0 )
_lowercase : List[Any] = False
else:
_lowercase : Union[str, Any] = self._random_mel_fusion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
_lowercase : int = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
_lowercase : Any = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_lowercase : List[Any] = int(max_length / len(UpperCamelCase_ ) )
_lowercase : List[str] = np.stack(np.tile(UpperCamelCase_ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_lowercase : Union[str, Any] = int(max_length / len(UpperCamelCase_ ) )
_lowercase : Union[str, Any] = np.stack(np.tile(UpperCamelCase_ , UpperCamelCase_ ) )
_lowercase : Dict = np.pad(UpperCamelCase_ , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
_lowercase : str = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters )
_lowercase : Dict = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
_lowercase : List[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any] , UpperCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase_ : str = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , **UpperCamelCase_ : Dict , ) -> BatchFeature:
'''simple docstring'''
_lowercase : Dict = truncation if truncation is not None else self.truncation
_lowercase : int = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_lowercase : Optional[Any] = isinstance(UpperCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_lowercase : List[str] = is_batched_numpy or (
isinstance(UpperCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowercase : Dict = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray ):
_lowercase : Any = np.asarray(UpperCamelCase_ , dtype=np.floataa )
elif isinstance(UpperCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowercase : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowercase : int = [np.asarray(UpperCamelCase_ )]
# convert to mel spectrogram, truncate and pad if needed.
_lowercase : Optional[Any] = [
self._get_input_mel(UpperCamelCase_ , max_length if max_length else self.nb_max_samples , UpperCamelCase_ , UpperCamelCase_ )
for waveform in raw_speech
]
_lowercase : List[Any] = []
_lowercase : Dict = []
for mel, longer in padded_inputs:
input_mel.append(UpperCamelCase_ )
is_longer.append(UpperCamelCase_ )
if truncation == "fusion" and sum(UpperCamelCase_ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_lowercase : Optional[Any] = np.random.randint(0 , len(UpperCamelCase_ ) )
_lowercase : str = True
if isinstance(input_mel[0] , UpperCamelCase_ ):
_lowercase : str = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_lowercase : Tuple = [[longer] for longer in is_longer]
_lowercase : Optional[Any] = {'input_features': input_mel, 'is_longer': is_longer}
_lowercase : Optional[int] = BatchFeature(UpperCamelCase_ )
if return_tensors is not None:
_lowercase : List[Any] = input_features.convert_to_tensors(UpperCamelCase_ )
return input_features
| 4 | 1 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=0.9_9_9 , __UpperCAmelCase="cosine" , ) -> int:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(__UpperCAmelCase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__UpperCAmelCase ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
__SCREAMING_SNAKE_CASE = []
for i in range(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = i / num_diffusion_timesteps
__SCREAMING_SNAKE_CASE = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCAmelCase ) / alpha_bar_fn(__UpperCAmelCase ) , __UpperCAmelCase ) )
return torch.tensor(__UpperCAmelCase , dtype=torch.floataa )
class __a ( _snake_case, _snake_case ):
__UpperCamelCase : Tuple = [e.name for e in KarrasDiffusionSchedulers]
__UpperCamelCase : Union[str, Any] = 2
@register_to_config
def __init__( self : Union[str, Any] ,lowerCamelCase : int = 1000 ,lowerCamelCase : float = 0.00_085 ,lowerCamelCase : float = 0.012 ,lowerCamelCase : str = "linear" ,lowerCamelCase : Optional[Union[np.ndarray, List[float]]] = None ,lowerCamelCase : str = "epsilon" ,lowerCamelCase : str = "linspace" ,lowerCamelCase : int = 0 ,):
'''simple docstring'''
if trained_betas is not None:
__SCREAMING_SNAKE_CASE = torch.tensor(lowerCamelCase ,dtype=torch.floataa )
elif beta_schedule == "linear":
__SCREAMING_SNAKE_CASE = torch.linspace(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__SCREAMING_SNAKE_CASE = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,lowerCamelCase ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__SCREAMING_SNAKE_CASE = betas_for_alpha_bar(lowerCamelCase )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
__SCREAMING_SNAKE_CASE = 1.0 - self.betas
__SCREAMING_SNAKE_CASE = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : int ,lowerCamelCase : str ,lowerCamelCase : int=None ):
'''simple docstring'''
if schedule_timesteps is None:
__SCREAMING_SNAKE_CASE = self.timesteps
__SCREAMING_SNAKE_CASE = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__SCREAMING_SNAKE_CASE = 1 if len(lowerCamelCase ) > 1 else 0
else:
__SCREAMING_SNAKE_CASE = timestep.cpu().item() if torch.is_tensor(lowerCamelCase ) else timestep
__SCREAMING_SNAKE_CASE = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : torch.FloatTensor ,lowerCamelCase : Union[float, torch.FloatTensor] ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.index_for_timestep(lowerCamelCase )
if self.state_in_first_order:
__SCREAMING_SNAKE_CASE = self.sigmas[step_index]
else:
__SCREAMING_SNAKE_CASE = self.sigmas_interpol[step_index]
__SCREAMING_SNAKE_CASE = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCAmelCase__ ( self : int ,lowerCamelCase : int ,lowerCamelCase : Union[str, torch.device] = None ,lowerCamelCase : Optional[int] = None ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = num_inference_steps
__SCREAMING_SNAKE_CASE = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__SCREAMING_SNAKE_CASE = np.linspace(0 ,num_train_timesteps - 1 ,lowerCamelCase ,dtype=lowerCamelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__SCREAMING_SNAKE_CASE = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__SCREAMING_SNAKE_CASE = (np.arange(0 ,lowerCamelCase ) * step_ratio).round()[::-1].copy().astype(lowerCamelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__SCREAMING_SNAKE_CASE = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__SCREAMING_SNAKE_CASE = (np.arange(lowerCamelCase ,0 ,-step_ratio )).round().copy().astype(lowerCamelCase )
timesteps -= 1
else:
raise ValueError(
f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
__SCREAMING_SNAKE_CASE = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__SCREAMING_SNAKE_CASE = torch.from_numpy(np.log(lowerCamelCase ) ).to(lowerCamelCase )
__SCREAMING_SNAKE_CASE = np.interp(lowerCamelCase ,np.arange(0 ,len(lowerCamelCase ) ) ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__SCREAMING_SNAKE_CASE = torch.from_numpy(lowerCamelCase ).to(device=lowerCamelCase )
# interpolate sigmas
__SCREAMING_SNAKE_CASE = sigmas.log().lerp(sigmas.roll(1 ).log() ,0.5 ).exp()
__SCREAMING_SNAKE_CASE = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__SCREAMING_SNAKE_CASE = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(lowerCamelCase ).startswith("""mps""" ):
# mps does not support float64
__SCREAMING_SNAKE_CASE = torch.from_numpy(lowerCamelCase ).to(lowerCamelCase ,dtype=torch.floataa )
else:
__SCREAMING_SNAKE_CASE = torch.from_numpy(lowerCamelCase ).to(lowerCamelCase )
# interpolate timesteps
__SCREAMING_SNAKE_CASE = self.sigma_to_t(lowerCamelCase ).to(lowerCamelCase ,dtype=timesteps.dtype )
__SCREAMING_SNAKE_CASE = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) ,dim=-1 ).flatten()
__SCREAMING_SNAKE_CASE = torch.cat([timesteps[:1], interleaved_timesteps] )
__SCREAMING_SNAKE_CASE = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__SCREAMING_SNAKE_CASE = defaultdict(lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = sigma.log()
# get distribution
__SCREAMING_SNAKE_CASE = log_sigma - self.log_sigmas[:, None]
# get sigmas range
__SCREAMING_SNAKE_CASE = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__SCREAMING_SNAKE_CASE = low_idx + 1
__SCREAMING_SNAKE_CASE = self.log_sigmas[low_idx]
__SCREAMING_SNAKE_CASE = self.log_sigmas[high_idx]
# interpolate sigmas
__SCREAMING_SNAKE_CASE = (low - log_sigma) / (low - high)
__SCREAMING_SNAKE_CASE = w.clamp(0 ,1 )
# transform interpolation to time range
__SCREAMING_SNAKE_CASE = (1 - w) * low_idx + w * high_idx
__SCREAMING_SNAKE_CASE = t.view(sigma.shape )
return t
@property
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
return self.sample is None
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : Union[torch.FloatTensor, np.ndarray] ,lowerCamelCase : Union[float, torch.FloatTensor] ,lowerCamelCase : Union[torch.FloatTensor, np.ndarray] ,lowerCamelCase : bool = True ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.index_for_timestep(lowerCamelCase )
# advance index counter by 1
__SCREAMING_SNAKE_CASE = timestep.cpu().item() if torch.is_tensor(lowerCamelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__SCREAMING_SNAKE_CASE = self.sigmas[step_index]
__SCREAMING_SNAKE_CASE = self.sigmas_interpol[step_index + 1]
__SCREAMING_SNAKE_CASE = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__SCREAMING_SNAKE_CASE = self.sigmas[step_index - 1]
__SCREAMING_SNAKE_CASE = self.sigmas_interpol[step_index]
__SCREAMING_SNAKE_CASE = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__SCREAMING_SNAKE_CASE = sigma_hat if self.state_in_first_order else sigma_interpol
__SCREAMING_SNAKE_CASE = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__SCREAMING_SNAKE_CASE = sigma_hat if self.state_in_first_order else sigma_interpol
__SCREAMING_SNAKE_CASE = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("""prediction_type not implemented yet: sample""" )
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__SCREAMING_SNAKE_CASE = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__SCREAMING_SNAKE_CASE = sigma_interpol - sigma_hat
# store for 2nd order step
__SCREAMING_SNAKE_CASE = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__SCREAMING_SNAKE_CASE = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__SCREAMING_SNAKE_CASE = sigma_next - sigma_hat
__SCREAMING_SNAKE_CASE = self.sample
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : torch.FloatTensor ,lowerCamelCase : torch.FloatTensor ,lowerCamelCase : torch.FloatTensor ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(lowerCamelCase ):
# mps does not support float64
__SCREAMING_SNAKE_CASE = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
__SCREAMING_SNAKE_CASE = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
__SCREAMING_SNAKE_CASE = self.timesteps.to(original_samples.device )
__SCREAMING_SNAKE_CASE = timesteps.to(original_samples.device )
__SCREAMING_SNAKE_CASE = [self.index_for_timestep(lowerCamelCase ,lowerCamelCase ) for t in timesteps]
__SCREAMING_SNAKE_CASE = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__SCREAMING_SNAKE_CASE = sigma.unsqueeze(-1 )
__SCREAMING_SNAKE_CASE = original_samples + noise * sigma
return noisy_samples
def __len__( self : List[Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 109 |
'''simple docstring'''
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
__a = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def __snake_case( _lowerCAmelCase=True ) -> Dict:
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_a ) )
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = None
lowercase = None
def lowerCamelCase ( self : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] ):
with TemporaryDirectory() as tmp_dir:
snake_case__ : Dict = dataset_module_factory(snake_case_ , cache_dir=snake_case_ )
snake_case__ : Optional[int] = import_main_class(dataset_module.module_path , dataset=snake_case_ )
snake_case__ : DatasetBuilder = builder_cls(
cache_dir=snake_case_ , config_name=snake_case_ , hash=dataset_module.hash , )
snake_case__ : Dict = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=snake_case_ ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
snake_case__ : List[str] = cached_path(snake_case_ , cache_dir=snake_case_ )
self.assertTrue(os.path.exists(snake_case_ ) )
@pytest.mark.integration
def __snake_case( _lowerCAmelCase ) -> Tuple:
snake_case__ : Optional[int] = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
snake_case__ : Dict = dataset_module_factory("""wikipedia""" , cache_dir=_lowerCAmelCase )
snake_case__ : Dict = import_main_class(dataset_module.module_path )
snake_case__ : DatasetBuilder = builder_cls(
cache_dir=_lowerCAmelCase , config_name="""20220301.frr""" , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
snake_case__ : Any = None
builder_instance.download_and_prepare()
snake_case__ : Union[str, Any] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def __snake_case( _lowerCAmelCase ) -> List[str]:
snake_case__ : Optional[int] = dataset_module_factory("""wikipedia""" , cache_dir=_lowerCAmelCase )
snake_case__ : List[str] = import_main_class(dataset_module.module_path , dataset=_lowerCAmelCase )
snake_case__ : DatasetBuilder = builder_cls(
cache_dir=_lowerCAmelCase , config_name="""20220301.frr""" , hash=dataset_module.hash , )
snake_case__ : Any = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert "train" in ds
assert isinstance(ds["""train"""] , _lowerCAmelCase )
assert next(iter(ds["""train"""] ) )
| 374 | 0 |
"""simple docstring"""
def __UpperCamelCase ( SCREAMING_SNAKE_CASE = 10_00 ) -> int:
"""simple docstring"""
__snake_case , __snake_case = 1, 1
__snake_case = []
for i in range(1 , n + 1 ):
__snake_case = prev_numerator + 2 * prev_denominator
__snake_case = prev_numerator + prev_denominator
if len(str(SCREAMING_SNAKE_CASE ) ) > len(str(SCREAMING_SNAKE_CASE ) ):
result.append(SCREAMING_SNAKE_CASE )
__snake_case = numerator
__snake_case = denominator
return len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 614 |
"""simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
__snake_case = TapasConfig.from_json_file(SCREAMING_SNAKE_CASE )
# set absolute/relative position embeddings parameter
__snake_case = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__snake_case = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "WTQ":
# run_task_main.py hparams
__snake_case = 4
__snake_case = True
# hparam_utils.py hparams
__snake_case = 0.664_694
__snake_case = 0.207_951
__snake_case = 0.121_194
__snake_case = True
__snake_case = True
__snake_case = False
__snake_case = 0.0_352_513
__snake_case = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__snake_case = 4
__snake_case = False
# hparam_utils.py hparams
__snake_case = 36.4_519
__snake_case = 0.903_421
__snake_case = 222.088
__snake_case = True
__snake_case = True
__snake_case = True
__snake_case = 0.763_141
__snake_case = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "TABFACT":
__snake_case = TapasForSequenceClassification(config=SCREAMING_SNAKE_CASE )
elif task == "MLM":
__snake_case = TapasForMaskedLM(config=SCREAMING_SNAKE_CASE )
elif task == "INTERMEDIATE_PRETRAINING":
__snake_case = TapasModel(config=SCREAMING_SNAKE_CASE )
else:
raise ValueError(F'''Task {task} not supported.''' )
print(F'''Building PyTorch model from configuration: {config}''' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model (weights and configuration)
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
# Save tokenizer files
print(F'''Save tokenizer files to {pytorch_dump_path}''' )
__snake_case = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=5_12 )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
print("Used relative position embeddings:" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 614 | 1 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 64 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
snake_case__ : str = logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
_a = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
_a = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
_a = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_a = field(
default=a__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]:
lowerCamelCase_ : Optional[Any] = self.task_name.lower()
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = "train"
_a = "dev"
_a = "test"
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = 42
_a = 42
_a = 42
def __init__( self : Any , __a : GlueDataTrainingArguments , __a : PreTrainedTokenizerBase , __a : Optional[int] = None , __a : Union[str, Split] = Split.train , __a : Optional[str] = None , ) ->Optional[int]:
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , __a , )
lowerCamelCase_ : Optional[int] = args
lowerCamelCase_ : Tuple = glue_processors[args.task_name]()
lowerCamelCase_ : Optional[Any] = glue_output_modes[args.task_name]
if isinstance(__a , __a ):
try:
lowerCamelCase_ : List[Any] = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
# Load data features from cache or dataset file
lowerCamelCase_ : List[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''' , )
lowerCamelCase_ : Any = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase_, lowerCamelCase_ : int = label_list[2], label_list[1]
lowerCamelCase_ : Any = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase_ : List[Any] = cached_features_file + """.lock"""
with FileLock(__a ):
if os.path.exists(__a ) and not args.overwrite_cache:
lowerCamelCase_ : str = time.time()
lowerCamelCase_ : int = torch.load(__a )
logger.info(
F'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
else:
logger.info(F'''Creating features from dataset file at {args.data_dir}''' )
if mode == Split.dev:
lowerCamelCase_ : List[Any] = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
lowerCamelCase_ : Tuple = self.processor.get_test_examples(args.data_dir )
else:
lowerCamelCase_ : List[Any] = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
lowerCamelCase_ : Dict = examples[:limit_length]
lowerCamelCase_ : Union[str, Any] = glue_convert_examples_to_features(
__a , __a , max_length=args.max_seq_length , label_list=__a , output_mode=self.output_mode , )
lowerCamelCase_ : Optional[Any] = time.time()
torch.save(self.features , __a )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self : Any ) ->Any:
return len(self.features )
def __getitem__( self : List[Any] , __a : Optional[int] ) ->InputFeatures:
return self.features[i]
def _lowerCAmelCase ( self : int ) ->Optional[int]:
return self.label_list
| 278 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
A__ : Optional[int]= logging.get_logger(__name__)
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(F'Could not make batched video from {videos}' )
class __lowerCamelCase ( _a ):
a : Tuple =["""pixel_values"""]
def __init__( self , snake_case_ = True , snake_case_ = None , snake_case_ = PILImageResampling.BILINEAR , snake_case_ = True , snake_case_ = None , snake_case_ = True , snake_case_ = 1 / 255 , snake_case_ = True , snake_case_ = True , snake_case_ = None , snake_case_ = None , **snake_case_ , ) -> None:
super().__init__(**snake_case_ )
UpperCamelCase__ = size if size is not None else {'shortest_edge': 256}
UpperCamelCase__ = get_size_dict(snake_case_ , default_to_square=snake_case_ )
UpperCamelCase__ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
UpperCamelCase__ = get_size_dict(snake_case_ , param_name='crop_size' )
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = do_center_crop
UpperCamelCase__ = crop_size
UpperCamelCase__ = resample
UpperCamelCase__ = do_rescale
UpperCamelCase__ = rescale_factor
UpperCamelCase__ = offset
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ = PILImageResampling.BILINEAR , snake_case_ = None , **snake_case_ , ) -> np.ndarray:
UpperCamelCase__ = get_size_dict(snake_case_ , default_to_square=snake_case_ )
if "shortest_edge" in size:
UpperCamelCase__ = get_resize_output_image_size(snake_case_ , size['shortest_edge'] , default_to_square=snake_case_ )
elif "height" in size and "width" in size:
UpperCamelCase__ = (size['height'], size['width'])
else:
raise ValueError(F'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
return resize(snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ , ) -> np.ndarray:
UpperCamelCase__ = get_size_dict(snake_case_ )
if "height" not in size or "width" not in size:
raise ValueError(F'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(snake_case_ , size=(size['height'], size['width']) , data_format=snake_case_ , **snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ = True , snake_case_ = None , **snake_case_ , ) -> str:
UpperCamelCase__ = image.astype(np.floataa )
if offset:
UpperCamelCase__ = image - (scale / 2)
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ , ) -> np.ndarray:
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , data_format=snake_case_ , **snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
if offset and not do_rescale:
raise ValueError('For offset, do_rescale must also be set to True.' )
# All transformations expect numpy arrays.
UpperCamelCase__ = to_numpy_array(snake_case_ )
if do_resize:
UpperCamelCase__ = self.resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ )
if do_center_crop:
UpperCamelCase__ = self.center_crop(snake_case_ , size=snake_case_ )
if do_rescale:
UpperCamelCase__ = self.rescale(image=snake_case_ , scale=snake_case_ , offset=snake_case_ )
if do_normalize:
UpperCamelCase__ = self.normalize(image=snake_case_ , mean=snake_case_ , std=snake_case_ )
UpperCamelCase__ = to_channel_dimension_format(snake_case_ , snake_case_ )
return image
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = ChannelDimension.FIRST , **snake_case_ , ) -> PIL.Image.Image:
UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ = resample if resample is not None else self.resample
UpperCamelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ = offset if offset is not None else self.offset
UpperCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ = image_std if image_std is not None else self.image_std
UpperCamelCase__ = size if size is not None else self.size
UpperCamelCase__ = get_size_dict(snake_case_ , default_to_square=snake_case_ )
UpperCamelCase__ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__ = get_size_dict(snake_case_ , param_name='crop_size' )
if not valid_images(snake_case_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
UpperCamelCase__ = make_batched(snake_case_ )
UpperCamelCase__ = [
[
self._preprocess_image(
image=snake_case_ , do_resize=snake_case_ , size=snake_case_ , resample=snake_case_ , do_center_crop=snake_case_ , crop_size=snake_case_ , do_rescale=snake_case_ , rescale_factor=snake_case_ , offset=snake_case_ , do_normalize=snake_case_ , image_mean=snake_case_ , image_std=snake_case_ , data_format=snake_case_ , )
for img in video
]
for video in videos
]
UpperCamelCase__ = {'pixel_values': videos}
return BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
| 20 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
A__ : Optional[Any]= """Run commands across TPU VMs for initial setup before running `accelerate launch`."""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE=None ) -> Dict:
"""simple docstring"""
if subparsers is not None:
UpperCamelCase__ = subparsers.add_parser('tpu-config' , description=_description )
else:
UpperCamelCase__ = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
UpperCamelCase__ = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=SCREAMING_SNAKE_CASE , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=SCREAMING_SNAKE_CASE , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
UpperCamelCase__ = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=SCREAMING_SNAKE_CASE , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE )
return parser
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase__ = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
UpperCamelCase__ = defaults.command_file
if not args.command and defaults.commands is not None:
UpperCamelCase__ = defaults.commands
if not args.tpu_name:
UpperCamelCase__ = defaults.tpu_name
if not args.tpu_zone:
UpperCamelCase__ = defaults.tpu_zone
if args.accelerate_version == "dev":
UpperCamelCase__ = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
UpperCamelCase__ = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = F'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
UpperCamelCase__ = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
UpperCamelCase__ = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [F'pip install {args.accelerate_version}']
new_cmd += args.command
UpperCamelCase__ = '; '.join(SCREAMING_SNAKE_CASE )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
UpperCamelCase__ = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F'Running {" ".join(SCREAMING_SNAKE_CASE )}' )
return
subprocess.run(SCREAMING_SNAKE_CASE )
print('Successfully setup pod.' )
def lowerCAmelCase_( ) -> int:
"""simple docstring"""
UpperCamelCase__ = tpu_command_parser()
UpperCamelCase__ = parser.parse_args()
tpu_command_launcher(SCREAMING_SNAKE_CASE )
| 20 | 1 |
def lowerCAmelCase_ ( __lowerCamelCase = 1_0_0_0_0_0_0 ):
__snake_case : Any = 1
__snake_case : Any = 1
__snake_case : Union[str, Any] = {1: 1}
for inputa in range(2 , __lowerCamelCase ):
__snake_case : str = 0
__snake_case : List[Any] = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__snake_case : List[str] = (3 * number) + 1
counter += 1
if inputa not in counters:
__snake_case : str = counter
if counter > pre_counter:
__snake_case : List[Any] = inputa
__snake_case : Tuple = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 81 | from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
snake_case = TypeVar("T")
class __A ( Generic[T] ):
'''simple docstring'''
a_ = 42 # Cache store of keys
a_ = 42 # References of the keys in cache
a_ = 10 # Maximum capacity of cache
def __init__( self , _snake_case ):
_lowerCAmelCase : Tuple = deque()
_lowerCAmelCase : List[Any] = set()
if not n:
_lowerCAmelCase : Any = sys.maxsize
elif n < 0:
raise ValueError("n should be an integer greater than 0." )
else:
_lowerCAmelCase : List[str] = n
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
_lowerCAmelCase : Optional[int] = self.dq_store.pop()
self.key_reference.remove(_snake_case )
else:
self.dq_store.remove(_snake_case )
self.dq_store.appendleft(_snake_case )
self.key_reference.add(_snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
for k in self.dq_store:
print(_snake_case )
def __repr__( self ):
return F"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case = LRUCache(4)
lru_cache.refer("A")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("A")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 424 | 0 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class a :
def __init__( self : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any]=2 , snake_case__ : Any=3 , snake_case__ : str=4 , snake_case__ : Tuple=2 , snake_case__ : Optional[Any]=7 , snake_case__ : int=True , snake_case__ : Dict=True , snake_case__ : int=True , snake_case__ : List[Any]=True , snake_case__ : Union[str, Any]=99 , snake_case__ : str=36 , snake_case__ : Tuple=2 , snake_case__ : Optional[int]=4 , snake_case__ : Optional[Any]=37 , snake_case__ : int="gelu" , snake_case__ : List[Any]=0.1 , snake_case__ : int=0.1 , snake_case__ : Optional[Any]=512 , snake_case__ : Union[str, Any]=16 , snake_case__ : Optional[int]=2 , snake_case__ : Any=0.0_2 , snake_case__ : List[str]=6 , snake_case__ : Optional[int]=6 , snake_case__ : Any=3 , snake_case__ : str=4 , snake_case__ : int=None , snake_case__ : Any=1_000 , ):
"""simple docstring"""
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = coordinate_size
__lowerCAmelCase = shape_size
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = scope
__lowerCAmelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__lowerCAmelCase = text_seq_length
__lowerCAmelCase = (image_size // patch_size) ** 2 + 1
__lowerCAmelCase = self.text_seq_length + self.image_seq_length
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__lowerCAmelCase = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__lowerCAmelCase = bbox[i, j, 3]
__lowerCAmelCase = bbox[i, j, 1]
__lowerCAmelCase = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__lowerCAmelCase = bbox[i, j, 2]
__lowerCAmelCase = bbox[i, j, 0]
__lowerCAmelCase = tmp_coordinate
__lowerCAmelCase = tf.constant(snake_case__ )
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.text_seq_length] )
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__lowerCAmelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Tuple ):
"""simple docstring"""
__lowerCAmelCase = TFLayoutLMvaModel(config=snake_case__ )
# text + image
__lowerCAmelCase = model(snake_case__ , pixel_values=snake_case__ , training=snake_case__ )
__lowerCAmelCase = model(
snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , training=snake_case__ , )
__lowerCAmelCase = model(snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ , training=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__lowerCAmelCase = model(snake_case__ , training=snake_case__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__lowerCAmelCase = model({"pixel_values": pixel_values} , training=snake_case__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Tuple , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Optional[int] ):
"""simple docstring"""
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = TFLayoutLMvaForSequenceClassification(config=snake_case__ )
__lowerCAmelCase = model(
snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , training=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Any , snake_case__ : Tuple ):
"""simple docstring"""
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = TFLayoutLMvaForTokenClassification(config=snake_case__ )
__lowerCAmelCase = model(
snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , training=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Tuple , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Any , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Optional[Any] ):
"""simple docstring"""
__lowerCAmelCase = 2
__lowerCAmelCase = TFLayoutLMvaForQuestionAnswering(config=snake_case__ )
__lowerCAmelCase = model(
snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , training=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
((__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase)) = config_and_inputs
__lowerCAmelCase = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class a ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowercase_ : List[Any] = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
lowercase_ : Optional[int] = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
lowercase_ : Dict = False
lowercase_ : str = False
lowercase_ : Any = False
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : int ):
"""simple docstring"""
return True
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Tuple=False ):
"""simple docstring"""
__lowerCAmelCase = copy.deepcopy(snake_case__ )
if model_class in get_values(snake_case__ ):
__lowerCAmelCase = {
k: tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(snake_case__ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(snake_case__ ):
__lowerCAmelCase = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(snake_case__ ):
__lowerCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__lowerCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(snake_case__ ):
__lowerCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(snake_case__ ):
__lowerCAmelCase = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__lowerCAmelCase = TFLayoutLMvaModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(snake_case__ )
if getattr(snake_case__ , "hf_compute_loss" , snake_case__ ):
# The number of elements in the loss should be the same as the number of elements in the label
__lowerCAmelCase = self._prepare_for_class(inputs_dict.copy() , snake_case__ , return_labels=snake_case__ )
__lowerCAmelCase = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=snake_case__ )[0]
]
__lowerCAmelCase = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__lowerCAmelCase = self._prepare_for_class(inputs_dict.copy() , snake_case__ , return_labels=snake_case__ )
__lowerCAmelCase = prepared_for_class.pop("input_ids" )
__lowerCAmelCase = model(snake_case__ , **snake_case__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__lowerCAmelCase = self._prepare_for_class(inputs_dict.copy() , snake_case__ , return_labels=snake_case__ )
__lowerCAmelCase = prepared_for_class.pop("input_ids" )
if "labels" in prepared_for_class:
__lowerCAmelCase = prepared_for_class["labels"].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__lowerCAmelCase = -100
__lowerCAmelCase = tf.convert_to_tensor(snake_case__ )
__lowerCAmelCase = model(snake_case__ , **snake_case__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__lowerCAmelCase = self._prepare_for_class(inputs_dict.copy() , snake_case__ , return_labels=snake_case__ )
__lowerCAmelCase = model(snake_case__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__lowerCAmelCase = self._prepare_for_class(inputs_dict.copy() , snake_case__ , return_labels=snake_case__ )
# Get keys that were added with the _prepare_for_class function
__lowerCAmelCase = prepared_for_class.keys() - inputs_dict.keys()
__lowerCAmelCase = inspect.signature(model.call ).parameters
__lowerCAmelCase = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__lowerCAmelCase = {0: "input_ids"}
for label_key in label_keys:
__lowerCAmelCase = signature_names.index(snake_case__ )
__lowerCAmelCase = label_key
__lowerCAmelCase = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__lowerCAmelCase = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__lowerCAmelCase = prepared_for_class[value]
__lowerCAmelCase = tuple(snake_case__ )
# Send to model
__lowerCAmelCase = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase = type
self.model_tester.create_and_check_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
@slow
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = TFLayoutLMvaModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def _UpperCAmelCase ( ):
"""simple docstring"""
__lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
class a ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=snake_case__ ) if is_vision_available() else None
@slow
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__lowerCAmelCase = TFLayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=snake_case__ , return_tensors="tf" ).pixel_values
__lowerCAmelCase = tf.constant([[1, 2]] )
__lowerCAmelCase = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__lowerCAmelCase = model(input_ids=snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ , training=snake_case__ )
# verify the logits
__lowerCAmelCase = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , snake_case__ )
__lowerCAmelCase = tf.constant(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case__ , atol=1E-4 ) )
| 376 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class a ( __UpperCAmelCase ):
lowercase_ : Union[str, Any] = 'segformer'
def __init__( self : List[str] , snake_case__ : Any=3 , snake_case__ : int=4 , snake_case__ : Tuple=[2, 2, 2, 2] , snake_case__ : Optional[int]=[8, 4, 2, 1] , snake_case__ : Union[str, Any]=[32, 64, 160, 256] , snake_case__ : str=[7, 3, 3, 3] , snake_case__ : List[Any]=[4, 2, 2, 2] , snake_case__ : Tuple=[1, 2, 5, 8] , snake_case__ : List[str]=[4, 4, 4, 4] , snake_case__ : Optional[Any]="gelu" , snake_case__ : Optional[Any]=0.0 , snake_case__ : Any=0.0 , snake_case__ : str=0.1 , snake_case__ : List[Any]=0.0_2 , snake_case__ : Any=0.1 , snake_case__ : List[Any]=1E-6 , snake_case__ : Any=256 , snake_case__ : Optional[Any]=255 , **snake_case__ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**snake_case__ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"
" removed, as the behaviour will default to that of reshape_last_stage = True." , snake_case__ , )
__lowerCAmelCase = num_channels
__lowerCAmelCase = num_encoder_blocks
__lowerCAmelCase = depths
__lowerCAmelCase = sr_ratios
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = patch_sizes
__lowerCAmelCase = strides
__lowerCAmelCase = mlp_ratios
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = classifier_dropout_prob
__lowerCAmelCase = initializer_range
__lowerCAmelCase = drop_path_rate
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = decoder_hidden_size
__lowerCAmelCase = kwargs.get("reshape_last_stage" , snake_case__ )
__lowerCAmelCase = semantic_loss_ignore_index
class a ( __UpperCAmelCase ):
lowercase_ : List[str] = version.parse('1.11' )
@property
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
return 1E-4
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
return 12
| 376 | 1 |
from math import pi
def lowerCAmelCase__ ( UpperCamelCase_ : int , UpperCamelCase_ : int )-> float:
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 632 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
_lowercase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 632 | 1 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( lowercase_ ,unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = LayoutLMTokenizer
_UpperCamelCase = LayoutLMTokenizerFast
_UpperCamelCase = True
_UpperCamelCase = True
def _UpperCAmelCase ( self ):
super().setUp()
_lowerCamelCase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _UpperCAmelCase ( self , **a__ ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **a__ )
def _UpperCAmelCase ( self , a__ ):
_lowerCamelCase = '''UNwant\u00E9d,running'''
_lowerCamelCase = '''unwanted, running'''
return input_text, output_text
def _UpperCAmelCase ( self ):
_lowerCamelCase = self.tokenizer_class(self.vocab_file )
_lowerCamelCase = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(a__ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , [7, 4, 5, 10, 8, 9] )
def _UpperCAmelCase ( self ):
pass
| 297 |
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def _lowerCamelCase ( _a ):
"""simple docstring"""
random.seed(_a )
np.random.seed(_a )
torch.manual_seed(_a )
torch.cuda.manual_seed_all(_a )
# ^^ safe to call this function even if cuda is not available
class __magic_name__ :
"""simple docstring"""
def __init__( self , a__ , a__ = 0.9999 , a__ = 0.0 , a__ = 0 , a__ = False , a__ = 1.0 , a__ = 2 / 3 , a__ = None , a__ = None , **a__ , ):
if isinstance(a__ , torch.nn.Module ):
_lowerCamelCase = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage`''' , '''1.0.0''' , a__ , standard_warn=a__ , )
_lowerCamelCase = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_lowerCamelCase = True
if kwargs.get('''max_value''' , a__ ) is not None:
_lowerCamelCase = '''The `max_value` argument is deprecated. Please use `decay` instead.'''
deprecate('''max_value''' , '''1.0.0''' , a__ , standard_warn=a__ )
_lowerCamelCase = kwargs['''max_value''']
if kwargs.get('''min_value''' , a__ ) is not None:
_lowerCamelCase = '''The `min_value` argument is deprecated. Please use `min_decay` instead.'''
deprecate('''min_value''' , '''1.0.0''' , a__ , standard_warn=a__ )
_lowerCamelCase = kwargs['''min_value''']
_lowerCamelCase = list(a__ )
_lowerCamelCase = [p.clone().detach() for p in parameters]
if kwargs.get('''device''' , a__ ) is not None:
_lowerCamelCase = '''The `device` argument is deprecated. Please use `to` instead.'''
deprecate('''device''' , '''1.0.0''' , a__ , standard_warn=a__ )
self.to(device=kwargs['''device'''] )
_lowerCamelCase = None
_lowerCamelCase = decay
_lowerCamelCase = min_decay
_lowerCamelCase = update_after_step
_lowerCamelCase = use_ema_warmup
_lowerCamelCase = inv_gamma
_lowerCamelCase = power
_lowerCamelCase = 0
_lowerCamelCase = None # set in `step()`
_lowerCamelCase = model_cls
_lowerCamelCase = model_config
@classmethod
def _UpperCAmelCase ( cls , a__ , a__ ):
_lowerCamelCase , _lowerCamelCase = model_cls.load_config(a__ , return_unused_kwargs=a__ )
_lowerCamelCase = model_cls.from_pretrained(a__ )
_lowerCamelCase = cls(model.parameters() , model_cls=a__ , model_config=model.config )
ema_model.load_state_dict(a__ )
return ema_model
def _UpperCAmelCase ( self , a__ ):
if self.model_cls is None:
raise ValueError('''`save_pretrained` can only be used if `model_cls` was defined at __init__.''' )
if self.model_config is None:
raise ValueError('''`save_pretrained` can only be used if `model_config` was defined at __init__.''' )
_lowerCamelCase = self.model_cls.from_config(self.model_config )
_lowerCamelCase = self.state_dict()
state_dict.pop('''shadow_params''' , a__ )
model.register_to_config(**a__ )
self.copy_to(model.parameters() )
model.save_pretrained(a__ )
def _UpperCAmelCase ( self , a__ ):
_lowerCamelCase = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_lowerCamelCase = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_lowerCamelCase = (1 + step) / (10 + step)
_lowerCamelCase = min(a__ , self.decay )
# make sure decay is not smaller than min_decay
_lowerCamelCase = max(a__ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def _UpperCAmelCase ( self , a__ ):
if isinstance(a__ , torch.nn.Module ):
_lowerCamelCase = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage.step`''' , '''1.0.0''' , a__ , standard_warn=a__ , )
_lowerCamelCase = parameters.parameters()
_lowerCamelCase = list(a__ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_lowerCamelCase = self.get_decay(self.optimization_step )
_lowerCamelCase = decay
_lowerCamelCase = 1 - decay
_lowerCamelCase = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , a__ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_lowerCamelCase = deepspeed.zero.GatheredParameters(a__ , modifier_rank=a__ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(a__ )
def _UpperCAmelCase ( self , a__ ):
_lowerCamelCase = list(a__ )
for s_param, param in zip(self.shadow_params , a__ ):
param.data.copy_(s_param.to(param.device ).data )
def _UpperCAmelCase ( self , a__=None , a__=None ):
_lowerCamelCase = [
p.to(device=a__ , dtype=a__ ) if p.is_floating_point() else p.to(device=a__ )
for p in self.shadow_params
]
def _UpperCAmelCase ( self ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def _UpperCAmelCase ( self , a__ ):
_lowerCamelCase = [param.detach().cpu().clone() for param in parameters]
def _UpperCAmelCase ( self , a__ ):
if self.temp_stored_params is None:
raise RuntimeError('''This ExponentialMovingAverage has no `store()`ed weights ''' '''to `restore()`''' )
for c_param, param in zip(self.temp_stored_params , a__ ):
param.data.copy_(c_param.data )
# Better memory-wise.
_lowerCamelCase = None
def _UpperCAmelCase ( self , a__ ):
_lowerCamelCase = copy.deepcopy(a__ )
_lowerCamelCase = state_dict.get('''decay''' , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('''Decay must be between 0 and 1''' )
_lowerCamelCase = state_dict.get('''min_decay''' , self.min_decay )
if not isinstance(self.min_decay , a__ ):
raise ValueError('''Invalid min_decay''' )
_lowerCamelCase = state_dict.get('''optimization_step''' , self.optimization_step )
if not isinstance(self.optimization_step , a__ ):
raise ValueError('''Invalid optimization_step''' )
_lowerCamelCase = state_dict.get('''update_after_step''' , self.update_after_step )
if not isinstance(self.update_after_step , a__ ):
raise ValueError('''Invalid update_after_step''' )
_lowerCamelCase = state_dict.get('''use_ema_warmup''' , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , a__ ):
raise ValueError('''Invalid use_ema_warmup''' )
_lowerCamelCase = state_dict.get('''inv_gamma''' , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError('''Invalid inv_gamma''' )
_lowerCamelCase = state_dict.get('''power''' , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError('''Invalid power''' )
_lowerCamelCase = state_dict.get('''shadow_params''' , a__ )
if shadow_params is not None:
_lowerCamelCase = shadow_params
if not isinstance(self.shadow_params , a__ ):
raise ValueError('''shadow_params must be a list''' )
if not all(isinstance(a__ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError('''shadow_params must all be Tensors''' )
| 297 | 1 |
"""simple docstring"""
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
__snake_case : Tuple = logging.get_logger(__name__)
@add_end_docstrings(a )
class UpperCamelCase ( a ):
"""simple docstring"""
def __init__( self : str , *_lowerCamelCase : str , **_lowerCamelCase : Tuple ):
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
self.check_model_type(_lowerCamelCase )
def A__ ( self : List[Any] , _lowerCamelCase : str=None , _lowerCamelCase : Any=None , _lowerCamelCase : Union[str, Any]=None , **_lowerCamelCase : Tuple ):
A__ , A__ = {}, {}
if padding is not None:
A__ = padding
if truncation is not None:
A__ = truncation
if top_k is not None:
A__ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Optional[Any] , _lowerCamelCase : Union["Image.Image", str] , _lowerCamelCase : str = None , **_lowerCamelCase : Optional[Any] ):
if isinstance(_lowerCamelCase , (Image.Image, str) ) and isinstance(_lowerCamelCase , _lowerCamelCase ):
A__ = {'''image''': image, '''question''': question}
else:
A__ = image
A__ = super().__call__(_lowerCamelCase , **_lowerCamelCase )
return results
def A__ ( self : Dict , _lowerCamelCase : int , _lowerCamelCase : str=False , _lowerCamelCase : List[str]=False ):
A__ = load_image(inputs['''image'''] )
A__ = self.tokenizer(
inputs['''question'''] , return_tensors=self.framework , padding=_lowerCamelCase , truncation=_lowerCamelCase )
A__ = self.image_processor(images=_lowerCamelCase , return_tensors=self.framework )
model_inputs.update(_lowerCamelCase )
return model_inputs
def A__ ( self : int , _lowerCamelCase : int ):
A__ = self.model(**_lowerCamelCase )
return model_outputs
def A__ ( self : Union[str, Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int]=5 ):
if top_k > self.model.config.num_labels:
A__ = self.model.config.num_labels
if self.framework == "pt":
A__ = model_outputs.logits.sigmoid()[0]
A__ , A__ = probs.topk(_lowerCamelCase )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
A__ = scores.tolist()
A__ = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_lowerCamelCase , _lowerCamelCase )]
| 571 |
"""simple docstring"""
def a_ ( __a ):
assert (
isinstance(__a , __a ) and number_of_steps > 0
), f'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
A__ , A__ = 1, 1
for _ in range(number_of_steps - 1 ):
A__ , A__ = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 571 | 1 |
"""simple docstring"""
from math import ceil
def _UpperCamelCase ( UpperCamelCase = 1001 ) -> str:
"""simple docstring"""
__UpperCAmelCase : Any = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
__UpperCAmelCase : int = 2 * i + 1
__UpperCAmelCase : int = 2 * i
__UpperCAmelCase : Dict = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
A = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number""")
| 720 |
"""simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class a__ ( __magic_name__ ):
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(UpperCamelCase_ , "hidden_sizes"))
self.parent.assertTrue(hasattr(UpperCamelCase_ , "num_attention_heads"))
self.parent.assertTrue(hasattr(UpperCamelCase_ , "num_encoder_blocks"))
class a__ :
def __init__( self : List[str] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str]=13 , UpperCamelCase_ : str=64 , UpperCamelCase_ : Optional[Any]=3 , UpperCamelCase_ : List[str]=4 , UpperCamelCase_ : int=[2, 2, 2, 2] , UpperCamelCase_ : int=[8, 4, 2, 1] , UpperCamelCase_ : int=[16, 32, 64, 128] , UpperCamelCase_ : Tuple=[1, 4, 8, 16] , UpperCamelCase_ : List[str]=[1, 2, 4, 8] , UpperCamelCase_ : str=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : str="gelu" , UpperCamelCase_ : Optional[Any]=0.1 , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : Dict=3 , UpperCamelCase_ : Union[str, Any]=None , ):
"""simple docstring"""
__UpperCAmelCase : str = parent
__UpperCAmelCase : Dict = batch_size
__UpperCAmelCase : str = image_size
__UpperCAmelCase : str = num_channels
__UpperCAmelCase : Any = num_encoder_blocks
__UpperCAmelCase : List[Any] = sr_ratios
__UpperCAmelCase : Optional[int] = depths
__UpperCAmelCase : Union[str, Any] = hidden_sizes
__UpperCAmelCase : Union[str, Any] = downsampling_rates
__UpperCAmelCase : Union[str, Any] = num_attention_heads
__UpperCAmelCase : Any = is_training
__UpperCAmelCase : Optional[Any] = use_labels
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : str = hidden_dropout_prob
__UpperCAmelCase : str = attention_probs_dropout_prob
__UpperCAmelCase : Dict = initializer_range
__UpperCAmelCase : List[str] = num_labels
__UpperCAmelCase : Union[str, Any] = scope
def a_ ( self : int):
"""simple docstring"""
__UpperCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__UpperCAmelCase : List[Any] = None
if self.use_labels:
__UpperCAmelCase : int = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
__UpperCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def a_ ( self : Any):
"""simple docstring"""
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def a_ ( self : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = SegformerModel(config=UpperCamelCase_)
model.to(UpperCamelCase_)
model.eval()
__UpperCAmelCase : List[Any] = model(UpperCamelCase_)
__UpperCAmelCase : Optional[int] = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width))
def a_ ( self : Optional[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple):
"""simple docstring"""
__UpperCAmelCase : Tuple = self.num_labels
__UpperCAmelCase : Union[str, Any] = SegformerForSemanticSegmentation(UpperCamelCase_)
model.to(UpperCamelCase_)
model.eval()
__UpperCAmelCase : Tuple = model(UpperCamelCase_)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
__UpperCAmelCase : int = model(UpperCamelCase_ , labels=UpperCamelCase_)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
self.parent.assertGreater(result.loss , 0.0)
def a_ ( self : Dict , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any]):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = 1
__UpperCAmelCase : Dict = SegformerForSemanticSegmentation(config=UpperCamelCase_)
model.to(UpperCamelCase_)
model.eval()
__UpperCAmelCase : List[str] = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size)).to(UpperCamelCase_)
__UpperCAmelCase : Dict = model(UpperCamelCase_ , labels=UpperCamelCase_)
self.parent.assertGreater(result.loss , 0.0)
def a_ ( self : List[Any]):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = config_and_inputs
__UpperCAmelCase : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase_ = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
lowercase_ = (
{
"feature-extraction": SegformerModel,
"image-classification": SegformerForImageClassification,
"image-segmentation": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase_ = True
lowercase_ = False
lowercase_ = False
lowercase_ = False
def a_ ( self : Any):
"""simple docstring"""
__UpperCAmelCase : Dict = SegformerModelTester(self)
__UpperCAmelCase : Any = SegformerConfigTester(self , config_class=UpperCamelCase_)
def a_ ( self : Dict):
"""simple docstring"""
self.config_tester.run_common_tests()
def a_ ( self : List[str]):
"""simple docstring"""
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_)
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*UpperCamelCase_)
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*UpperCamelCase_)
@unittest.skip("SegFormer does not use inputs_embeds")
def a_ ( self : Dict):
"""simple docstring"""
pass
@unittest.skip("SegFormer does not have get_input_embeddings method and get_output_embeddings methods")
def a_ ( self : Any):
"""simple docstring"""
pass
def a_ ( self : Dict):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Optional[int] = model_class(UpperCamelCase_)
__UpperCAmelCase : Union[str, Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : int = [*signature.parameters.keys()]
__UpperCAmelCase : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase_)
def a_ ( self : Dict):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Any = True
for model_class in self.all_model_classes:
__UpperCAmelCase : List[Any] = True
__UpperCAmelCase : Dict = False
__UpperCAmelCase : Optional[Any] = True
__UpperCAmelCase : int = model_class(UpperCamelCase_)
model.to(UpperCamelCase_)
model.eval()
with torch.no_grad():
__UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_))
__UpperCAmelCase : Union[str, Any] = outputs.attentions
__UpperCAmelCase : Optional[Any] = sum(self.model_tester.depths)
self.assertEqual(len(UpperCamelCase_) , UpperCamelCase_)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : List[Any] = model_class(UpperCamelCase_)
model.to(UpperCamelCase_)
model.eval()
with torch.no_grad():
__UpperCAmelCase : Tuple = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_))
__UpperCAmelCase : str = outputs.attentions
self.assertEqual(len(UpperCamelCase_) , UpperCamelCase_)
# verify the first attentions (first block, first layer)
__UpperCAmelCase : Any = (self.model_tester.image_size // 4) ** 2
__UpperCAmelCase : Optional[int] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
__UpperCAmelCase : int = (self.model_tester.image_size // 32) ** 2
__UpperCAmelCase : List[Any] = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:]) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
__UpperCAmelCase : Optional[int] = len(UpperCamelCase_)
# Check attention is always last and order is fine
__UpperCAmelCase : str = True
__UpperCAmelCase : Union[str, Any] = True
__UpperCAmelCase : List[str] = model_class(UpperCamelCase_)
model.to(UpperCamelCase_)
model.eval()
with torch.no_grad():
__UpperCAmelCase : List[Any] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_))
self.assertEqual(out_len + 1 , len(UpperCamelCase_))
__UpperCAmelCase : str = outputs.attentions
self.assertEqual(len(UpperCamelCase_) , UpperCamelCase_)
# verify the first attentions (first block, first layer)
__UpperCAmelCase : Union[str, Any] = (self.model_tester.image_size // 4) ** 2
__UpperCAmelCase : List[Any] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def a_ ( self : Union[str, Any]):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any]):
__UpperCAmelCase : Tuple = model_class(UpperCamelCase_)
model.to(UpperCamelCase_)
model.eval()
with torch.no_grad():
__UpperCAmelCase : Tuple = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_))
__UpperCAmelCase : int = outputs.hidden_states
__UpperCAmelCase : Any = self.model_tester.num_encoder_blocks
self.assertEqual(len(UpperCamelCase_) , UpperCamelCase_)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Any = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : List[Any] = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
def a_ ( self : Any):
"""simple docstring"""
if not self.model_tester.is_training:
return
__UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Optional[Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(UpperCamelCase_):
continue
__UpperCAmelCase : List[Any] = model_class(UpperCamelCase_)
model.to(UpperCamelCase_)
model.train()
__UpperCAmelCase : Optional[int] = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_)
__UpperCAmelCase : Tuple = model(**UpperCamelCase_).loss
loss.backward()
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def a_ ( self : List[str]):
"""simple docstring"""
pass
@slow
def a_ ( self : int):
"""simple docstring"""
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Optional[Any] = SegformerModel.from_pretrained(UpperCamelCase_)
self.assertIsNotNone(UpperCamelCase_)
def _UpperCamelCase ( ) -> Dict:
"""simple docstring"""
__UpperCAmelCase : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class a__ ( unittest.TestCase ):
@slow
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCamelCase_ , align=UpperCamelCase_ , do_random_crop=UpperCamelCase_)
__UpperCAmelCase : List[str] = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512").to(
UpperCamelCase_)
__UpperCAmelCase : Dict = prepare_img()
__UpperCAmelCase : List[Any] = image_processor(images=UpperCamelCase_ , return_tensors="pt")
__UpperCAmelCase : Optional[int] = encoded_inputs.pixel_values.to(UpperCamelCase_)
with torch.no_grad():
__UpperCAmelCase : Tuple = model(UpperCamelCase_)
__UpperCAmelCase : Optional[int] = torch.Size((1, model.config.num_labels, 128, 128))
self.assertEqual(outputs.logits.shape , UpperCamelCase_)
__UpperCAmelCase : List[str] = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
]).to(UpperCamelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCamelCase_ , atol=1e-4))
@slow
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : Any = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCamelCase_ , align=UpperCamelCase_ , do_random_crop=UpperCamelCase_)
__UpperCAmelCase : Dict = SegformerForSemanticSegmentation.from_pretrained(
"nvidia/segformer-b1-finetuned-cityscapes-1024-1024").to(UpperCamelCase_)
__UpperCAmelCase : Union[str, Any] = prepare_img()
__UpperCAmelCase : Optional[Any] = image_processor(images=UpperCamelCase_ , return_tensors="pt")
__UpperCAmelCase : int = encoded_inputs.pixel_values.to(UpperCamelCase_)
with torch.no_grad():
__UpperCAmelCase : Tuple = model(UpperCamelCase_)
__UpperCAmelCase : str = torch.Size((1, model.config.num_labels, 128, 128))
self.assertEqual(outputs.logits.shape , UpperCamelCase_)
__UpperCAmelCase : Optional[int] = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
]).to(UpperCamelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCamelCase_ , atol=1e-1))
@slow
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCamelCase_ , align=UpperCamelCase_ , do_random_crop=UpperCamelCase_)
__UpperCAmelCase : Union[str, Any] = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512").to(
UpperCamelCase_)
__UpperCAmelCase : Dict = prepare_img()
__UpperCAmelCase : Any = image_processor(images=UpperCamelCase_ , return_tensors="pt")
__UpperCAmelCase : List[str] = encoded_inputs.pixel_values.to(UpperCamelCase_)
with torch.no_grad():
__UpperCAmelCase : Tuple = model(UpperCamelCase_)
__UpperCAmelCase : Any = outputs.logits.detach().cpu()
__UpperCAmelCase : Optional[int] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase_ , target_sizes=[(500, 300)])
__UpperCAmelCase : Tuple = torch.Size((500, 300))
self.assertEqual(segmentation[0].shape , UpperCamelCase_)
__UpperCAmelCase : List[Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase_)
__UpperCAmelCase : str = torch.Size((128, 128))
self.assertEqual(segmentation[0].shape , UpperCamelCase_)
| 487 | 0 |
import math
def _lowerCAmelCase ( A__ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCamelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCAmelCase ( A__ = 10_001 ):
try:
lowercase__ = int(UpperCamelCase_ )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
lowercase__ = []
lowercase__ = 2
while len(UpperCamelCase_ ) < nth:
if is_prime(UpperCamelCase_ ):
primes.append(UpperCamelCase_ )
num += 1
else:
num += 1
return primes[len(UpperCamelCase_ ) - 1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 622 |
"""simple docstring"""
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def _lowerCAmelCase ( UpperCamelCase_ ):
return np.dot(UpperCamelCase_ , UpperCamelCase_ )
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , *,
lowerCAmelCase__ = np.inf , lowerCAmelCase__ = "linear" , lowerCAmelCase__ = 0.0 , ):
__SCREAMING_SNAKE_CASE = regularization
__SCREAMING_SNAKE_CASE = gamma
if kernel == "linear":
__SCREAMING_SNAKE_CASE = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("""rbf kernel requires gamma""")
if not isinstance(self.gamma , (float, int)):
raise ValueError("""gamma must be float or int""")
if not self.gamma > 0:
raise ValueError("""gamma must be > 0""")
__SCREAMING_SNAKE_CASE = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
__SCREAMING_SNAKE_CASE = f"Unknown kernel: {kernel}"
raise ValueError(lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__):
return np.dot(lowerCAmelCase__ , lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__):
return np.exp(-(self.gamma * norm_squared(vectora - vectora)))
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = observations
__SCREAMING_SNAKE_CASE = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((__SCREAMING_SNAKE_CASE) ,) = np.shape(lowerCAmelCase__)
def to_minimize(lowerCAmelCase__) -> float:
__SCREAMING_SNAKE_CASE = 0
((__SCREAMING_SNAKE_CASE) ,) = np.shape(lowerCAmelCase__)
for i in range(lowerCAmelCase__):
for j in range(lowerCAmelCase__):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j])
)
return 1 / 2 * s - sum(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = LinearConstraint(lowerCAmelCase__ , 0 , 0)
__SCREAMING_SNAKE_CASE = Bounds(0 , self.regularization)
__SCREAMING_SNAKE_CASE = minimize(
lowerCAmelCase__ , np.ones(lowerCAmelCase__) , bounds=lowerCAmelCase__ , constraints=[ly_contraint]).x
__SCREAMING_SNAKE_CASE = l_star
# calculating mean offset of separation plane to points
__SCREAMING_SNAKE_CASE = 0
for i in range(lowerCAmelCase__):
for j in range(lowerCAmelCase__):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j])
__SCREAMING_SNAKE_CASE = s / n
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , lowerCAmelCase__)
for n in range(len(self.classes)))
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 155 | 0 |
import torch
from transformers import AutoModel
class lowerCAmelCase__ ( torch.nn.Module ):
def __init__( self : Union[str, Any] , __UpperCamelCase : List[Any]="sayef/fsner-bert-base-uncased" ) -> Dict:
super(__UpperCamelCase , self ).__init__()
A = AutoModel.from_pretrained(__UpperCamelCase , return_dict=__UpperCamelCase )
A = torch.nn.CosineSimilarity(3 , 1e-08 )
A = torch.nn.Softmax(dim=1 )
def __UpperCamelCase ( self : Optional[Any] , **__UpperCamelCase : List[str] ) -> Optional[int]:
return self.bert(**__UpperCamelCase ).last_hidden_state
def __UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Dict ) -> Dict:
return token_embeddings.sum(2 , keepdim=__UpperCamelCase )
def __UpperCamelCase ( self : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any]=1 ) -> Optional[int]:
return self.softmax(T * self.cos(__UpperCamelCase , __UpperCamelCase ) )
def __UpperCamelCase ( self : Any , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] ) -> Union[str, Any]:
A = W_supports['sizes'].tolist()
A = W_supports['start_token_id'].item()
A = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
A = self.BERT(**__UpperCamelCase )
A = self.BERT(**__UpperCamelCase )
A = None
A = None
A = W_supports['input_ids'] == start_token_id
A = W_supports['input_ids'] == end_token_id
for i, size in enumerate(__UpperCamelCase ):
if i == 0:
A = 0
else:
A = support_sizes[i - 1]
A = S[s : s + size][start_token_masks[s : s + size]]
A = S[s : s + size][end_token_masks[s : s + size]]
A = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
A = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
A = torch.vstack((p_starts, p_start) )
A = torch.vstack((p_ends, p_end) )
else:
A = p_start
A = p_end
return p_starts, p_ends | 224 |
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
__snake_case :Tuple ='src/transformers'
__snake_case :Dict ='docs/source/en'
__snake_case :Dict ='.'
def lowerCamelCase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Any ) -> List[Any]:
'''simple docstring'''
with open(lowerCAmelCase__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
A = f.readlines()
# Find the start prompt.
A = 0
while not lines[start_index].startswith(lowerCAmelCase__ ):
start_index += 1
start_index += 1
A = start_index
while not lines[end_index].startswith(lowerCAmelCase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
__snake_case :List[Any] ='Model|Encoder|Decoder|ForConditionalGeneration'
# Regexes that match TF/Flax/PT model names.
__snake_case :List[Any] =re.compile(r'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
__snake_case :List[str] =re.compile(r'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__snake_case :Tuple =re.compile(r'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# This is to make sure the transformers module imported is the one in the repo.
__snake_case :int =direct_transformers_import(TRANSFORMERS_PATH)
def lowerCamelCase_ ( lowerCAmelCase__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
A = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , lowerCAmelCase__ )
return [m.group(0 ) for m in matches]
def lowerCamelCase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : str ) -> Tuple:
'''simple docstring'''
A = 2 if text == '✅' or text == '❌' else len(lowerCAmelCase__ )
A = (width - text_length) // 2
A = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowerCamelCase_ ( ) -> Any:
'''simple docstring'''
A = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
A = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
A = {name: config.replace('Config' , '' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
A = collections.defaultdict(lowerCAmelCase__ )
A = collections.defaultdict(lowerCAmelCase__ )
A = collections.defaultdict(lowerCAmelCase__ )
A = collections.defaultdict(lowerCAmelCase__ )
A = collections.defaultdict(lowerCAmelCase__ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowerCAmelCase__ ):
A = None
if attr_name.endswith('Tokenizer' ):
A = slow_tokenizers
A = attr_name[:-9]
elif attr_name.endswith('TokenizerFast' ):
A = fast_tokenizers
A = attr_name[:-13]
elif _re_tf_models.match(lowerCAmelCase__ ) is not None:
A = tf_models
A = _re_tf_models.match(lowerCAmelCase__ ).groups()[0]
elif _re_flax_models.match(lowerCAmelCase__ ) is not None:
A = flax_models
A = _re_flax_models.match(lowerCAmelCase__ ).groups()[0]
elif _re_pt_models.match(lowerCAmelCase__ ) is not None:
A = pt_models
A = _re_pt_models.match(lowerCAmelCase__ ).groups()[0]
if lookup_dict is not None:
while len(lowerCAmelCase__ ) > 0:
if attr_name in model_name_to_prefix.values():
A = True
break
# Try again after removing the last word in the name
A = ''.join(camel_case_split(lowerCAmelCase__ )[:-1] )
# Let's build that table!
A = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
A = ['Model', 'Tokenizer slow', 'Tokenizer fast', 'PyTorch support', 'TensorFlow support', 'Flax Support']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
A = [len(lowerCAmelCase__ ) + 2 for c in columns]
A = max([len(lowerCAmelCase__ ) for name in model_names] ) + 2
# Build the table per se
A = '|' + '|'.join([_center_text(lowerCAmelCase__ , lowerCAmelCase__ ) for c, w in zip(lowerCAmelCase__ , lowerCAmelCase__ )] ) + '|\n'
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([':' + '-' * (w - 2) + ':' for w in widths] ) + "|\n"
A = {True: '✅', False: '❌'}
for name in model_names:
A = model_name_to_prefix[name]
A = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowerCAmelCase__ , lowerCAmelCase__ ) for l, w in zip(lowerCAmelCase__ , lowerCAmelCase__ )] ) + "|\n"
return table
def lowerCamelCase_ ( lowerCAmelCase__ : Tuple=False ) -> List[str]:
'''simple docstring'''
A , A , A , A = _find_text_in_file(
filename=os.path.join(lowerCAmelCase__ , 'index.md' ) , start_prompt='<!--This table is updated automatically from the auto modules' , end_prompt='<!-- End table-->' , )
A = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowerCAmelCase__ , 'index.md' ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.' )
if __name__ == "__main__":
__snake_case :List[Any] =argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__snake_case :List[Any] =parser.parse_args()
check_model_table(args.fix_and_overwrite) | 224 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''facebook/data2vec-vision-base-ft''': (
'''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'''
),
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : int = "data2vec-vision"
def __init__( self, SCREAMING_SNAKE_CASE_=768, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=3072, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=1e-12, SCREAMING_SNAKE_CASE_=224, SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=[3, 5, 7, 11], SCREAMING_SNAKE_CASE_=[1, 2, 3, 6], SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=0.4, SCREAMING_SNAKE_CASE_=256, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=255, **SCREAMING_SNAKE_CASE_, ) -> Dict:
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = hidden_size
UpperCamelCase : Dict = num_hidden_layers
UpperCamelCase : List[Any] = num_attention_heads
UpperCamelCase : Optional[Any] = intermediate_size
UpperCamelCase : Union[str, Any] = hidden_act
UpperCamelCase : int = hidden_dropout_prob
UpperCamelCase : Union[str, Any] = attention_probs_dropout_prob
UpperCamelCase : List[Any] = initializer_range
UpperCamelCase : Any = layer_norm_eps
UpperCamelCase : List[Any] = image_size
UpperCamelCase : int = patch_size
UpperCamelCase : Tuple = num_channels
UpperCamelCase : str = use_mask_token
UpperCamelCase : Union[str, Any] = use_absolute_position_embeddings
UpperCamelCase : int = use_relative_position_bias
UpperCamelCase : Optional[int] = use_shared_relative_position_bias
UpperCamelCase : int = layer_scale_init_value
UpperCamelCase : List[Any] = drop_path_rate
UpperCamelCase : str = use_mean_pooling
# decode head attributes (semantic segmentation)
UpperCamelCase : List[str] = out_indices
UpperCamelCase : Union[str, Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
UpperCamelCase : List[str] = use_auxiliary_head
UpperCamelCase : Any = auxiliary_loss_weight
UpperCamelCase : Any = auxiliary_channels
UpperCamelCase : Tuple = auxiliary_num_convs
UpperCamelCase : str = auxiliary_concat_input
UpperCamelCase : Optional[int] = semantic_loss_ignore_index
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : Any = version.parse("1.11" )
@property
def snake_case_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def snake_case_ ( self ) -> float:
return 1e-4
| 40 |
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
__A : Tuple = 10
def UpperCamelCase_ ( A__ : int , A__ : int , A__ : list[int] , A__ : int ):
'''simple docstring'''
for i in range(A__ , A__ ):
if array[i] == target:
return i
return -1
def UpperCamelCase_ ( A__ : list[int] , A__ : int ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : Union[str, Any] = len(A__ )
while left <= right:
if right - left < precision:
return lin_search(A__ , A__ , A__ , A__ )
lowerCAmelCase_ : Dict = (left + right) // 3 + 1
lowerCAmelCase_ : List[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
lowerCAmelCase_ : List[str] = one_third - 1
elif array[two_third] < target:
lowerCAmelCase_ : Tuple = two_third + 1
else:
lowerCAmelCase_ : Tuple = one_third + 1
lowerCAmelCase_ : Any = two_third - 1
else:
return -1
def UpperCamelCase_ ( A__ : int , A__ : int , A__ : list[int] , A__ : int ):
'''simple docstring'''
if left < right:
if right - left < precision:
return lin_search(A__ , A__ , A__ , A__ )
lowerCAmelCase_ : Dict = (left + right) // 3 + 1
lowerCAmelCase_ : Dict = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(A__ , one_third - 1 , A__ , A__ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , A__ , A__ , A__ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , A__ , A__ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : List[Any] = input("Enter numbers separated by comma:\n").strip()
__A : Union[str, Any] = [int(item.strip()) for item in user_input.split(",")]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
__A : List[Any] = int(input("Enter the number to be found in the list:\n").strip())
__A : str = ite_ternary_search(collection, target)
__A : Dict = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F'''Iterative search: {target} found at positions: {resulta}''')
print(F'''Recursive search: {target} found at positions: {resulta}''')
else:
print("Not found")
| 275 | 0 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ = BartphoTokenizer
lowerCamelCase__ = False
lowerCamelCase__ = True
def snake_case__ ( self):
'''simple docstring'''
super().setUp()
_lowerCAmelCase : str = ["▁This", "▁is", "▁a", "▁t", "est"]
_lowerCAmelCase : Dict = dict(zip(snake_case_, range(len(snake_case_))))
_lowerCAmelCase : Tuple = {"unk_token": "<unk>"}
_lowerCAmelCase : Tuple = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["monolingual_vocab_file"])
with open(self.monolingual_vocab_file, "w", encoding="utf-8") as fp:
for token in vocab_tokens:
fp.write(f"{token} {vocab_tokens[token]}\n")
_lowerCAmelCase : Dict = BartphoTokenizer(snake_case_, self.monolingual_vocab_file, **self.special_tokens_map)
tokenizer.save_pretrained(self.tmpdirname)
def snake_case__ ( self, **__a):
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return BartphoTokenizer.from_pretrained(self.tmpdirname, **snake_case_)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = "This is a là test"
_lowerCAmelCase : Optional[int] = "This is a<unk><unk> test"
return input_text, output_text
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = BartphoTokenizer(snake_case_, self.monolingual_vocab_file, **self.special_tokens_map)
_lowerCAmelCase : Optional[int] = "This is a là test"
_lowerCAmelCase : Optional[Any] = "▁This ▁is ▁a ▁l à ▁t est".split()
_lowerCAmelCase : Dict = tokenizer.tokenize(snake_case_)
self.assertListEqual(snake_case_, snake_case_)
_lowerCAmelCase : Tuple = tokens + [tokenizer.unk_token]
_lowerCAmelCase : Optional[int] = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_), snake_case_)
| 720 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
_snake_case = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
_snake_case = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
_snake_case = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def snake_case__ ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string", id="token"), id="sequence"),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string", id="token"), id="sequence"), id="references"),
}), codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"], reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
], )
def snake_case__ ( self, __a, __a, __a=4, __a=False):
'''simple docstring'''
_lowerCAmelCase : List[str] = compute_bleu(
reference_corpus=__a, translation_corpus=__a, max_order=__a, smooth=__a)
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Dict = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 658 | 0 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 42 |
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( __UpperCamelCase ) -> bool:
lowerCamelCase_ = str(__UpperCamelCase )
return len(__UpperCamelCase ) == 9 and set(__UpperCamelCase ) == set('123456789' )
def _UpperCamelCase ( ) -> int | None:
for base_num in range(99_99 ,49_99 ,-1 ):
lowerCamelCase_ = 10_00_02 * base_num
if is_9_pandigital(__UpperCamelCase ):
return candidate
for base_num in range(3_33 ,99 ,-1 ):
lowerCamelCase_ = 1_00_20_03 * base_num
if is_9_pandigital(__UpperCamelCase ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 42 | 1 |
"""simple docstring"""
def A ( snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = len(snake_case__ )
SCREAMING_SNAKE_CASE__ = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
SCREAMING_SNAKE_CASE__ = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
SCREAMING_SNAKE_CASE__ = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
SCREAMING_SNAKE_CASE__ = subset[i - 1][j]
if arr[i - 1] <= j:
SCREAMING_SNAKE_CASE__ = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 616 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : List[Any] = logging.get_logger(__name__)
A_ : Dict = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class lowerCamelCase (A__ ):
lowerCamelCase__ : int = 'unispeech'
def __init__( self : Union[str, Any] , __UpperCAmelCase : List[Any]=3_2 , __UpperCAmelCase : Union[str, Any]=7_6_8 , __UpperCAmelCase : Tuple=1_2 , __UpperCAmelCase : Dict=1_2 , __UpperCAmelCase : Optional[Any]=3_0_7_2 , __UpperCAmelCase : Optional[Any]="gelu" , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : List[Any]=0.1 , __UpperCAmelCase : List[str]=0.1 , __UpperCAmelCase : List[str]=0.0 , __UpperCAmelCase : Tuple=0.0 , __UpperCAmelCase : List[Any]=0.1 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : Union[str, Any]=0.02 , __UpperCAmelCase : Union[str, Any]=1e-5 , __UpperCAmelCase : List[Any]="group" , __UpperCAmelCase : Union[str, Any]="gelu" , __UpperCAmelCase : Any=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __UpperCAmelCase : List[str]=(5, 2, 2, 2, 2, 2, 2) , __UpperCAmelCase : int=(1_0, 3, 3, 3, 3, 2, 2) , __UpperCAmelCase : str=False , __UpperCAmelCase : Any=1_2_8 , __UpperCAmelCase : str=1_6 , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Union[str, Any]=0.05 , __UpperCAmelCase : str=1_0 , __UpperCAmelCase : Optional[Any]=2 , __UpperCAmelCase : List[Any]=0.0 , __UpperCAmelCase : Tuple=1_0 , __UpperCAmelCase : Tuple=0 , __UpperCAmelCase : Tuple=3_2_0 , __UpperCAmelCase : Any=2 , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : Any=1_0_0 , __UpperCAmelCase : str=2_5_6 , __UpperCAmelCase : Dict=2_5_6 , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : List[str]="mean" , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : str=2_5_6 , __UpperCAmelCase : Dict=8_0 , __UpperCAmelCase : List[Any]=0 , __UpperCAmelCase : int=1 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : Any=0.5 , **__UpperCAmelCase : List[str] , ) -> Tuple:
super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = feat_extract_norm
SCREAMING_SNAKE_CASE__ = feat_extract_activation
SCREAMING_SNAKE_CASE__ = list(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = list(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = list(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = conv_bias
SCREAMING_SNAKE_CASE__ = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE__ = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE__ = len(self.conv_dim )
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_dropout
SCREAMING_SNAKE_CASE__ = attention_dropout
SCREAMING_SNAKE_CASE__ = activation_dropout
SCREAMING_SNAKE_CASE__ = feat_proj_dropout
SCREAMING_SNAKE_CASE__ = final_dropout
SCREAMING_SNAKE_CASE__ = layerdrop
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_ctc_classes
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = do_stable_layer_norm
SCREAMING_SNAKE_CASE__ = use_weighted_layer_sum
SCREAMING_SNAKE_CASE__ = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE__ = apply_spec_augment
SCREAMING_SNAKE_CASE__ = mask_time_prob
SCREAMING_SNAKE_CASE__ = mask_time_length
SCREAMING_SNAKE_CASE__ = mask_time_min_masks
SCREAMING_SNAKE_CASE__ = mask_feature_prob
SCREAMING_SNAKE_CASE__ = mask_feature_length
SCREAMING_SNAKE_CASE__ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE__ = num_codevectors_per_group
SCREAMING_SNAKE_CASE__ = num_codevector_groups
SCREAMING_SNAKE_CASE__ = contrastive_logits_temperature
SCREAMING_SNAKE_CASE__ = feat_quantizer_dropout
SCREAMING_SNAKE_CASE__ = num_negatives
SCREAMING_SNAKE_CASE__ = codevector_dim
SCREAMING_SNAKE_CASE__ = proj_codevector_dim
SCREAMING_SNAKE_CASE__ = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE__ = ctc_loss_reduction
SCREAMING_SNAKE_CASE__ = ctc_zero_infinity
# pretraining loss
SCREAMING_SNAKE_CASE__ = replace_prob
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 616 | 1 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
a = logging.get_logger(__name__)
@add_end_docstrings(__magic_name__ )
class UpperCamelCase__ ( __magic_name__ ):
def __init__( self : Tuple , *UpperCamelCase__ : int , **UpperCamelCase__ : Dict ):
'''simple docstring'''
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
self.check_model_type(UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : List[Any]=None , **UpperCamelCase__ : int ):
'''simple docstring'''
lowercase_ , lowercase_ = {}, {}
if padding is not None:
lowercase_ = padding
if truncation is not None:
lowercase_ = truncation
if top_k is not None:
lowercase_ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Dict , UpperCamelCase__ : Union["Image.Image", str] , UpperCamelCase__ : str = None , **UpperCamelCase__ : Any ):
'''simple docstring'''
if isinstance(UpperCamelCase__ , (Image.Image, str) ) and isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowercase_ = {"""image""": image, """question""": question}
else:
lowercase_ = image
lowercase_ = super().__call__(UpperCamelCase__ , **UpperCamelCase__ )
return results
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : str=False , UpperCamelCase__ : Optional[int]=False ):
'''simple docstring'''
lowercase_ = load_image(inputs["""image"""] )
lowercase_ = self.tokenizer(
inputs["""question"""] , return_tensors=self.framework , padding=UpperCamelCase__ , truncation=UpperCamelCase__ )
lowercase_ = self.image_processor(images=UpperCamelCase__ , return_tensors=self.framework )
model_inputs.update(UpperCamelCase__ )
return model_inputs
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : Any ):
'''simple docstring'''
lowercase_ = self.model(**UpperCamelCase__ )
return model_outputs
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple=5 ):
'''simple docstring'''
if top_k > self.model.config.num_labels:
lowercase_ = self.model.config.num_labels
if self.framework == "pt":
lowercase_ = model_outputs.logits.sigmoid()[0]
lowercase_ , lowercase_ = probs.topk(UpperCamelCase__ )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
lowercase_ = scores.tolist()
lowercase_ = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase__ , UpperCamelCase__ )]
| 412 |
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def UpperCAmelCase_ ( UpperCAmelCase__ ):
return np.dot(UpperCAmelCase__ , UpperCAmelCase__ )
class UpperCamelCase__ :
def __init__( self : Any , *,
UpperCamelCase__ : float = np.inf , UpperCamelCase__ : str = "linear" , UpperCamelCase__ : float = 0.0 , ):
'''simple docstring'''
lowercase_ = regularization
lowercase_ = gamma
if kernel == "linear":
lowercase_ = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("""rbf kernel requires gamma""" )
if not isinstance(self.gamma , (float, int) ):
raise ValueError("""gamma must be float or int""" )
if not self.gamma > 0:
raise ValueError("""gamma must be > 0""" )
lowercase_ = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
lowercase_ = F'''Unknown kernel: {kernel}'''
raise ValueError(UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : ndarray , UpperCamelCase__ : ndarray ):
'''simple docstring'''
return np.dot(UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : ndarray , UpperCamelCase__ : ndarray ):
'''simple docstring'''
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : list[ndarray] , UpperCamelCase__ : ndarray ):
'''simple docstring'''
lowercase_ = observations
lowercase_ = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((lowercase_) , ) = np.shape(UpperCamelCase__ )
def to_minimize(UpperCamelCase__ : ndarray ) -> float:
lowercase_ = 0
((lowercase_) , ) = np.shape(UpperCamelCase__ )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(UpperCamelCase__ )
lowercase_ = LinearConstraint(UpperCamelCase__ , 0 , 0 )
lowercase_ = Bounds(0 , self.regularization )
lowercase_ = minimize(
UpperCamelCase__ , np.ones(UpperCamelCase__ ) , bounds=UpperCamelCase__ , constraints=[ly_contraint] ).x
lowercase_ = l_star
# calculating mean offset of separation plane to points
lowercase_ = 0
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
lowercase_ = s / n
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : ndarray ):
'''simple docstring'''
lowercase_ = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , UpperCamelCase__ )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 412 | 1 |
import collections
import os
import re
from pathlib import Path
__magic_name__ = '''src/transformers'''
# Matches is_xxx_available()
__magic_name__ = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
__magic_name__ = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__magic_name__ = re.compile(r'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
__magic_name__ = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
__magic_name__ = re.compile(r'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__magic_name__ = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
__magic_name__ = re.compile(r'''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
__magic_name__ = re.compile(r'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
__magic_name__ = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
__magic_name__ = re.compile(r'''^\s*try:''')
# Catches a line with else:
__magic_name__ = re.compile(r'''^\s*else:''')
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
if _re_test_backend.search(__lowerCAmelCase ) is None:
return None
snake_case__ = [b[0] for b in _re_backend.findall(__lowerCAmelCase )]
backends.sort()
return "_and_".join(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
with open(__lowerCAmelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
snake_case__ = f.readlines()
snake_case__ = 0
while line_index < len(__lowerCAmelCase ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__lowerCAmelCase ):
return None
# First grab the objects without a specific backend in _import_structure
snake_case__ = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
snake_case__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__lowerCAmelCase ):
snake_case__ = _re_one_line_import_struct.search(__lowerCAmelCase ).groups()[0]
snake_case__ = re.findall(R"\[([^\]]+)\]" , __lowerCAmelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
snake_case__ = _re_import_struct_key_value.search(__lowerCAmelCase )
if single_line_import_search is not None:
snake_case__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(__lowerCAmelCase ) > 0]
objects.extend(__lowerCAmelCase )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
snake_case__ = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
snake_case__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
snake_case__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
snake_case__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
snake_case__ = lines[line_index]
if _re_import_struct_add_one.search(__lowerCAmelCase ) is not None:
objects.append(_re_import_struct_add_one.search(__lowerCAmelCase ).groups()[0] )
elif _re_import_struct_add_many.search(__lowerCAmelCase ) is not None:
snake_case__ = _re_import_struct_add_many.search(__lowerCAmelCase ).groups()[0].split(", " )
snake_case__ = [obj[1:-1] for obj in imports if len(__lowerCAmelCase ) > 0]
objects.extend(__lowerCAmelCase )
elif _re_between_brackets.search(__lowerCAmelCase ) is not None:
snake_case__ = _re_between_brackets.search(__lowerCAmelCase ).groups()[0].split(", " )
snake_case__ = [obj[1:-1] for obj in imports if len(__lowerCAmelCase ) > 0]
objects.extend(__lowerCAmelCase )
elif _re_quote_object.search(__lowerCAmelCase ) is not None:
objects.append(_re_quote_object.search(__lowerCAmelCase ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
snake_case__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
snake_case__ = []
while (
line_index < len(__lowerCAmelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
snake_case__ = lines[line_index]
snake_case__ = _re_import.search(__lowerCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
snake_case__ = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(__lowerCAmelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
snake_case__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
snake_case__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
snake_case__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
snake_case__ = lines[line_index]
snake_case__ = _re_import.search(__lowerCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
snake_case__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase ):
def find_duplicates(__lowerCAmelCase ):
return [k for k, v in collections.Counter(__lowerCAmelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
snake_case__ = []
for key in import_dict_objects.keys():
snake_case__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
snake_case__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
snake_case__ = "base imports" if key == "none" else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def SCREAMING_SNAKE_CASE__ ( ):
snake_case__ = []
for root, _, files in os.walk(__lowerCAmelCase ):
if "__init__.py" in files:
snake_case__ = os.path.join(__lowerCAmelCase , "__init__.py" )
snake_case__ = parse_init(__lowerCAmelCase )
if objects is not None:
snake_case__ = analyze_results(*__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
snake_case__ = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append("\n".join(__lowerCAmelCase ) )
if len(__lowerCAmelCase ) > 0:
raise ValueError("\n\n".join(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( ):
snake_case__ = []
for path, directories, files in os.walk(__lowerCAmelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(__lowerCAmelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__lowerCAmelCase ) / folder).glob("*.py" ) ) ) == 0:
continue
snake_case__ = str((Path(__lowerCAmelCase ) / folder).relative_to(__lowerCAmelCase ) )
snake_case__ = short_path.replace(os.path.sep , "." )
submodules.append(__lowerCAmelCase )
for fname in files:
if fname == "__init__.py":
continue
snake_case__ = str((Path(__lowerCAmelCase ) / fname).relative_to(__lowerCAmelCase ) )
snake_case__ = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(__lowerCAmelCase )
return submodules
__magic_name__ = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
'''models.esm.openfold_utils''',
]
def SCREAMING_SNAKE_CASE__ ( ):
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
snake_case__ = direct_transformers_import(__lowerCAmelCase )
snake_case__ = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(__lowerCAmelCase , "__init__.py" ) , "r" ) as f:
snake_case__ = f.read()
import_structure_keys.update(set(re.findall(R"import_structure\[\"([^\"]*)\"\]" , __lowerCAmelCase ) ) )
snake_case__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(__lowerCAmelCase ) > 0:
snake_case__ = "\n".join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registed in the main init of Transformers:\n"
F"""{list_of_modules}\n"""
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 700 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase ):
if len(__lowerCAmelCase ) == 0:
return False
snake_case__ = len(__lowerCAmelCase ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , __lowerCAmelCase )
else:
return binary_search(a_list[midpoint + 1 :] , __lowerCAmelCase )
if __name__ == "__main__":
__magic_name__ = input('''Enter numbers separated by comma:\n''').strip()
__magic_name__ = [int(item.strip()) for item in user_input.split(''',''')]
__magic_name__ = int(input('''Enter the number to be found in the list:\n''').strip())
__magic_name__ = '''''' if binary_search(sequence, target) else '''not '''
print(F'''{target} was {not_str}found in {sequence}''')
| 530 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_a: int = logging.get_logger(__name__)
_a: int = {
"""CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": (
"""https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"""
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class __UpperCamelCase ( SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ = '''trajectory_transformer'''
SCREAMING_SNAKE_CASE__ = ['''past_key_values''']
SCREAMING_SNAKE_CASE__ = {
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Dict , lowerCAmelCase : int=100 , lowerCAmelCase : str=5 , lowerCAmelCase : Dict=1 , lowerCAmelCase : int=1 , lowerCAmelCase : Any=249 , lowerCAmelCase : str=6 , lowerCAmelCase : int=17 , lowerCAmelCase : Any=25 , lowerCAmelCase : str=4 , lowerCAmelCase : int=4 , lowerCAmelCase : str=128 , lowerCAmelCase : Any=0.1 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : List[Any]=0.1 , lowerCAmelCase : List[str]=0.0_006 , lowerCAmelCase : Tuple=512 , lowerCAmelCase : int=0.02 , lowerCAmelCase : Optional[Any]=1e-12 , lowerCAmelCase : int=1 , lowerCAmelCase : str=True , lowerCAmelCase : int=1 , lowerCAmelCase : str=50_256 , lowerCAmelCase : Optional[int]=50_256 , **lowerCAmelCase : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = action_weight
UpperCAmelCase_ = reward_weight
UpperCAmelCase_ = value_weight
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = block_size
UpperCAmelCase_ = action_dim
UpperCAmelCase_ = observation_dim
UpperCAmelCase_ = transition_dim
UpperCAmelCase_ = learning_rate
UpperCAmelCase_ = n_layer
UpperCAmelCase_ = n_head
UpperCAmelCase_ = n_embd
UpperCAmelCase_ = embd_pdrop
UpperCAmelCase_ = attn_pdrop
UpperCAmelCase_ = resid_pdrop
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = kaiming_initializer_range
UpperCAmelCase_ = use_cache
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase ) | 162 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class A ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@staticmethod
@abstractmethod
def _UpperCAmelCase ( __lowerCAmelCase ):
raise NotImplementedError()
@abstractmethod
def _UpperCAmelCase ( self ):
raise NotImplementedError()
| 208 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __A :
def __init__( self , a__ , a__=13 , a__=7 , a__=6 , a__=17 , a__=23 , a__=11 , a__=True , ):
_lowerCAmelCase : int = parent
_lowerCAmelCase : Union[str, Any] = batch_size
_lowerCAmelCase : Optional[int] = seq_length
_lowerCAmelCase : Optional[int] = act_dim
_lowerCAmelCase : Any = state_dim
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : Dict = max_length
_lowerCAmelCase : int = is_training
def __A ( self ):
_lowerCAmelCase : List[str] = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
_lowerCAmelCase : Any = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
_lowerCAmelCase : Dict = floats_tensor((self.batch_size, self.seq_length, 1) )
_lowerCAmelCase : Any = floats_tensor((self.batch_size, self.seq_length, 1) )
_lowerCAmelCase : Any = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
_lowerCAmelCase : List[str] = random_attention_mask((self.batch_size, self.seq_length) )
_lowerCAmelCase : str = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def __A ( self ):
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
_lowerCAmelCase : Any = DecisionTransformerModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCAmelCase : List[Any] = model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def __A ( self ):
_lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Optional[Any] = config_and_inputs
_lowerCAmelCase : Dict = {
"""states""": states,
"""actions""": actions,
"""rewards""": rewards,
"""returns_to_go""": returns_to_go,
"""timesteps""": timesteps,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_torch
class __A ( __a , __a , __a , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = (DecisionTransformerModel,) if is_torch_available() else ()
_UpperCamelCase : List[str] = ()
_UpperCamelCase : Optional[int] = {'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
_UpperCamelCase : Optional[int] = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : List[Any] = False
_UpperCamelCase : str = False
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : str = False
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : List[Any] = False
_UpperCamelCase : Dict = False
_UpperCamelCase : List[str] = False
def __A ( self ):
_lowerCAmelCase : Any = DecisionTransformerModelTester(self )
_lowerCAmelCase : Tuple = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
@slow
def __A ( self ):
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[Any] = DecisionTransformerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[str] = model_class(snake_case__ )
_lowerCAmelCase : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : str = [*signature.parameters.keys()]
_lowerCAmelCase : Dict = [
"""states""",
"""actions""",
"""rewards""",
"""returns_to_go""",
"""timesteps""",
"""attention_mask""",
]
self.assertListEqual(arg_names[: len(snake_case__ )] , snake_case__ )
@require_torch
class __A ( unittest.TestCase ):
@slow
def __A ( self ):
_lowerCAmelCase : int = 2 # number of steps of autoregressive prediction we will perform
_lowerCAmelCase : str = 10 # defined by the RL environment, may be normalized
_lowerCAmelCase : str = DecisionTransformerModel.from_pretrained("""edbeeching/decision-transformer-gym-hopper-expert""" )
_lowerCAmelCase : Dict = model.to(snake_case__ )
_lowerCAmelCase : str = model.config
torch.manual_seed(0 )
_lowerCAmelCase : Any = torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ) # env.reset()
_lowerCAmelCase : List[Any] = torch.tensor(
[[0.2_4_2_7_9_3, -0.2_8_6_9_3_0_7_4, 0.8_7_4_2_6_1_3], [0.6_7_8_1_5_2_7_4, -0.0_8_1_0_1_0_8_5, -0.1_2_9_5_2_1_4_7]] , device=snake_case__ )
_lowerCAmelCase : List[Any] = torch.tensor(snake_case__ , device=snake_case__ , dtype=torch.floataa ).reshape(1 , 1 , 1 )
_lowerCAmelCase : int = state
_lowerCAmelCase : List[str] = torch.zeros(1 , 0 , config.act_dim , device=snake_case__ , dtype=torch.floataa )
_lowerCAmelCase : Dict = torch.zeros(1 , 0 , device=snake_case__ , dtype=torch.floataa )
_lowerCAmelCase : Union[str, Any] = torch.tensor(0 , device=snake_case__ , dtype=torch.long ).reshape(1 , 1 )
for step in range(snake_case__ ):
_lowerCAmelCase : List[Any] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=snake_case__ )] , dim=1 )
_lowerCAmelCase : Union[str, Any] = torch.cat([rewards, torch.zeros(1 , 1 , device=snake_case__ )] , dim=1 )
_lowerCAmelCase : List[Any] = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = model(
states=snake_case__ , actions=snake_case__ , rewards=snake_case__ , returns_to_go=snake_case__ , timesteps=snake_case__ , attention_mask=snake_case__ , return_dict=snake_case__ , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ),
1.0,
False,
{},
)
_lowerCAmelCase : Union[str, Any] = action_pred[0, -1]
_lowerCAmelCase : List[Any] = torch.cat([states, state] , dim=1 )
_lowerCAmelCase : List[str] = returns_to_go[0, -1] - reward
_lowerCAmelCase : Tuple = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
_lowerCAmelCase : Any = torch.cat(
[timesteps, torch.ones((1, 1) , device=snake_case__ , dtype=torch.long ) * (step + 1)] , dim=1 )
| 706 | """simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Dict = CodeGenTokenizer
_UpperCamelCase : Dict = CodeGenTokenizerFast
_UpperCamelCase : Tuple = True
_UpperCamelCase : List[Any] = {"add_prefix_space": True}
_UpperCamelCase : str = False
def __A ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
_lowerCAmelCase : Optional[int] = dict(zip(a__ , range(len(a__ ) ) ) )
_lowerCAmelCase : str = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_lowerCAmelCase : Any = {"""unk_token""": """<unk>"""}
_lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(a__ ) )
def __A ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **a__ )
def __A ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **a__ )
def __A ( self , a__ ):
_lowerCAmelCase : str = """lower newer"""
_lowerCAmelCase : Tuple = """lower newer"""
return input_text, output_text
def __A ( self ):
_lowerCAmelCase : str = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCAmelCase : int = """lower newer"""
_lowerCAmelCase : List[str] = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_lowerCAmelCase : Optional[int] = tokenizer.tokenize(a__ , add_prefix_space=a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Tuple = tokens + [tokenizer.unk_token]
_lowerCAmelCase : List[str] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __A ( self ):
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = self.get_rust_tokenizer(add_prefix_space=a__ )
_lowerCAmelCase : Any = """lower newer"""
# Testing tokenization
_lowerCAmelCase : Any = tokenizer.tokenize(a__ , add_prefix_space=a__ )
_lowerCAmelCase : int = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids without special tokens
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(a__ , add_special_tokens=a__ , add_prefix_space=a__ )
_lowerCAmelCase : Dict = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids with special tokens
_lowerCAmelCase : int = self.get_rust_tokenizer(add_prefix_space=a__ )
_lowerCAmelCase : Optional[int] = tokenizer.encode(a__ , add_prefix_space=a__ )
_lowerCAmelCase : Any = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
# Testing the unknown token
_lowerCAmelCase : List[str] = tokens + [rust_tokenizer.unk_token]
_lowerCAmelCase : Dict = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __A ( self , *a__ , **a__ ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __A ( self , a__=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_lowerCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
# Simple input
_lowerCAmelCase : Dict = """This is a simple input"""
_lowerCAmelCase : Optional[int] = ["""This is a simple input 1""", """This is a simple input 2"""]
_lowerCAmelCase : Optional[int] = ("""This is a simple input""", """This is a pair""")
_lowerCAmelCase : str = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
def __A ( self ):
_lowerCAmelCase : Any = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
_lowerCAmelCase : Union[str, Any] = """This is a simple input"""
_lowerCAmelCase : Dict = ["""This is a simple input looooooooong""", """This is a simple input"""]
_lowerCAmelCase : Any = ("""This is a simple input""", """This is a pair""")
_lowerCAmelCase : Optional[int] = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
_lowerCAmelCase : Optional[int] = tokenizer.pad_token_id
_lowerCAmelCase : Any = tokenizer(a__ , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
_lowerCAmelCase : str = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="""np""" )
_lowerCAmelCase : int = tokenizer(*a__ , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
_lowerCAmelCase : int = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def __A ( self ):
_lowerCAmelCase : List[str] = """$$$"""
_lowerCAmelCase : str = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=a__ , add_bos_token=a__ )
_lowerCAmelCase : Tuple = """This is a simple input"""
_lowerCAmelCase : Union[str, Any] = ["""This is a simple input 1""", """This is a simple input 2"""]
_lowerCAmelCase : List[str] = tokenizer.bos_token_id
_lowerCAmelCase : str = tokenizer(a__ )
_lowerCAmelCase : Union[str, Any] = tokenizer(a__ )
self.assertEqual(out_s.input_ids[0] , a__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowerCAmelCase : Optional[Any] = tokenizer.decode(out_s.input_ids )
_lowerCAmelCase : Optional[int] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , a__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __A ( self ):
_lowerCAmelCase : int = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" )
_lowerCAmelCase : Optional[int] = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
_lowerCAmelCase : List[Any] = """\nif len_a > len_b: result = a\nelse: result = b"""
_lowerCAmelCase : Tuple = tokenizer.encode(a__ )
_lowerCAmelCase : Optional[Any] = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
_lowerCAmelCase : int = tokenizer.decode(a__ , truncate_before_pattern=a__ )
self.assertEqual(a__ , a__ )
def __A ( self ):
pass
| 663 | 0 |
'''simple docstring'''
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''tensor(bool)''': np.bool_,
'''tensor(int8)''': np.inta,
'''tensor(uint8)''': np.uinta,
'''tensor(int16)''': np.intaa,
'''tensor(uint16)''': np.uintaa,
'''tensor(int32)''': np.intaa,
'''tensor(uint32)''': np.uintaa,
'''tensor(int64)''': np.intaa,
'''tensor(uint64)''': np.uintaa,
'''tensor(float16)''': np.floataa,
'''tensor(float)''': np.floataa,
'''tensor(double)''': np.floataa,
}
class lowerCAmelCase :
def __init__( self : List[str] , __lowercase : str=None , **__lowercase : Dict ):
"""simple docstring"""
logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' )
__lowercase =model
__lowercase =kwargs.get('model_save_dir' , __lowercase )
__lowercase =kwargs.get('latest_model_name' , __lowercase )
def __call__( self : List[str] , **__lowercase : int ):
"""simple docstring"""
__lowercase ={k: np.array(__lowercase ) for k, v in kwargs.items()}
return self.model.run(__lowercase , __lowercase )
@staticmethod
def snake_case ( __lowercase : Union[str, Path] , __lowercase : List[Any]=None , __lowercase : Optional[Any]=None ):
"""simple docstring"""
if provider is None:
logger.info('No onnxruntime provider specified, using CPUExecutionProvider' )
__lowercase ='CPUExecutionProvider'
return ort.InferenceSession(__lowercase , providers=[provider] , sess_options=__lowercase )
def snake_case ( self : Dict , __lowercase : Union[str, Path] , __lowercase : Optional[str] = None , **__lowercase : List[Any] ):
"""simple docstring"""
__lowercase =file_name if file_name is not None else ONNX_WEIGHTS_NAME
__lowercase =self.model_save_dir.joinpath(self.latest_model_name )
__lowercase =Path(__lowercase ).joinpath(__lowercase )
try:
shutil.copyfile(__lowercase , __lowercase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
__lowercase =self.model_save_dir.joinpath(__lowercase )
if src_path.exists():
__lowercase =Path(__lowercase ).joinpath(__lowercase )
try:
shutil.copyfile(__lowercase , __lowercase )
except shutil.SameFileError:
pass
def snake_case ( self : List[Any] , __lowercase : Union[str, os.PathLike] , **__lowercase : Any , ):
"""simple docstring"""
if os.path.isfile(__lowercase ):
logger.error(f'''Provided path ({save_directory}) should be a directory, not a file''' )
return
os.makedirs(__lowercase , exist_ok=__lowercase )
# saving model weights/files
self._save_pretrained(__lowercase , **__lowercase )
@classmethod
def snake_case ( cls : Any , __lowercase : Union[str, Path] , __lowercase : Optional[Union[bool, str, None]] = None , __lowercase : Optional[Union[str, None]] = None , __lowercase : bool = False , __lowercase : Optional[str] = None , __lowercase : Optional[str] = None , __lowercase : Optional[str] = None , __lowercase : Optional["ort.SessionOptions"] = None , **__lowercase : Tuple , ):
"""simple docstring"""
__lowercase =file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(__lowercase ):
__lowercase =OnnxRuntimeModel.load_model(
os.path.join(__lowercase , __lowercase ) , provider=__lowercase , sess_options=__lowercase )
__lowercase =Path(__lowercase )
# load model from hub
else:
# download model
__lowercase =hf_hub_download(
repo_id=__lowercase , filename=__lowercase , use_auth_token=__lowercase , revision=__lowercase , cache_dir=__lowercase , force_download=__lowercase , )
__lowercase =Path(__lowercase ).parent
__lowercase =Path(__lowercase ).name
__lowercase =OnnxRuntimeModel.load_model(__lowercase , provider=__lowercase , sess_options=__lowercase )
return cls(model=__lowercase , **__lowercase )
@classmethod
def snake_case ( cls : int , __lowercase : Union[str, Path] , __lowercase : bool = True , __lowercase : Optional[str] = None , __lowercase : Optional[str] = None , **__lowercase : Optional[Any] , ):
"""simple docstring"""
__lowercase =None
if len(str(__lowercase ).split('@' ) ) == 2:
__lowercase , __lowercase =model_id.split('@' )
return cls._from_pretrained(
model_id=__lowercase , revision=__lowercase , cache_dir=__lowercase , force_download=__lowercase , use_auth_token=__lowercase , **__lowercase , )
| 119 |
'''simple docstring'''
def __UpperCamelCase ( lowercase__ : List[str], lowercase__ : Tuple ):
'''simple docstring'''
__lowercase =[0 for i in range(r + 1 )]
# nc0 = 1
__lowercase =1
for i in range(1, n + 1 ):
# to compute current row from previous row.
__lowercase =min(lowercase__, lowercase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 119 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
snake_case = {
"configuration_nezha": ["NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP", "NezhaConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST",
"NezhaForNextSentencePrediction",
"NezhaForMaskedLM",
"NezhaForPreTraining",
"NezhaForMultipleChoice",
"NezhaForQuestionAnswering",
"NezhaForSequenceClassification",
"NezhaForTokenClassification",
"NezhaModel",
"NezhaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 587 | import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
snake_case = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 128,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class __A ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls ):
_lowerCAmelCase : Optional[Any] = TOKEN
HfFolder.save_token(_snake_case )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls ):
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Optional[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("test-config" , use_auth_token=self._token )
_lowerCAmelCase : List[Any] = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_snake_case , repo_id="test-config" , push_to_hub=_snake_case , use_auth_token=self._token )
_lowerCAmelCase : Union[str, Any] = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case ) )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : int = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
_lowerCAmelCase : Tuple = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_snake_case , repo_id="valid_org/test-config-org" , push_to_hub=_snake_case , use_auth_token=self._token )
_lowerCAmelCase : Any = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case ) )
def SCREAMING_SNAKE_CASE__ ( self ):
CustomConfig.register_for_auto_class()
_lowerCAmelCase : Optional[Any] = CustomConfig(attribute=42 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
_lowerCAmelCase : Any = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" , trust_remote_code=_snake_case )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 42 )
class __A ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Union[str, Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_lowerCAmelCase : Tuple = c.n_embd + 1 # int
_lowerCAmelCase : Dict = c.resid_pdrop + 1.0 # float
_lowerCAmelCase : Dict = not c.scale_attn_weights # bool
_lowerCAmelCase : int = c.summary_type + "foo" # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(_snake_case , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(_snake_case , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(_snake_case , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(_snake_case , c.summary_type , "mismatch for key: summary_type" )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Dict = PretrainedConfig()
_lowerCAmelCase : int = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_snake_case , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
_lowerCAmelCase : List[str] = [key for key, value in config_common_kwargs.items() if value == getattr(_snake_case , _snake_case )]
if len(_snake_case ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
F""" {', '.join(_snake_case )}.""" )
def SCREAMING_SNAKE_CASE__ ( self ):
with self.assertRaises(_snake_case ):
# config is in subfolder, the following should not work without specifying the subfolder
_lowerCAmelCase : Optional[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
_lowerCAmelCase : Optional[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(_snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
# A mock response for an HTTP head request to emulate server down
_lowerCAmelCase : Tuple = mock.Mock()
_lowerCAmelCase : Any = 500
_lowerCAmelCase : Any = {}
_lowerCAmelCase : Any = HTTPError
_lowerCAmelCase : List[str] = {}
# Download this model to make sure it's in the cache.
_lowerCAmelCase : Optional[int] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=_snake_case ) as mock_head:
_lowerCAmelCase : Optional[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE__ ( self ):
# This test is for deprecated behavior and can be removed in v5
_lowerCAmelCase : Any = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : List[str] = AutoConfig.from_pretrained("bert-base-cased" )
_lowerCAmelCase : Tuple = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_snake_case )
_lowerCAmelCase : Optional[int] = 2
json.dump(configuration.to_dict() , open(os.path.join(_snake_case , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_lowerCAmelCase : int = AutoConfig.from_pretrained(_snake_case )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_lowerCAmelCase : List[Any] = ["config.42.0.0.json"]
_lowerCAmelCase : str = 768
configuration.save_pretrained(_snake_case )
shutil.move(os.path.join(_snake_case , "config.4.0.0.json" ) , os.path.join(_snake_case , "config.42.0.0.json" ) )
_lowerCAmelCase : str = AutoConfig.from_pretrained(_snake_case )
self.assertEqual(new_configuration.hidden_size , 768 )
def SCREAMING_SNAKE_CASE__ ( self ):
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
_lowerCAmelCase : List[Any] = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
_lowerCAmelCase : str = "v4.0.0"
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = new_transformers.models.auto.AutoConfig.from_pretrained(
_snake_case , return_unused_kwargs=_snake_case )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_snake_case , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_lowerCAmelCase : Union[str, Any] = "v3.0.0"
_lowerCAmelCase : Any = old_transformers.models.auto.AutoConfig.from_pretrained(_snake_case )
self.assertEqual(old_configuration.hidden_size , 768 )
| 587 | 1 |
'''simple docstring'''
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Any = ('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value')
_UpperCamelCase : Union[str, Any] = (
('layer.', 'layer_'),
('word_embeddings.weight', 'word_embeddings'),
('position_embeddings.weight', 'position_embeddings'),
('token_type_embeddings.weight', 'token_type_embeddings'),
('.', '/'),
('LayerNorm/weight', 'LayerNorm/gamma'),
('LayerNorm/bias', 'LayerNorm/beta'),
('weight', 'kernel'),
)
if not os.path.isdir(UpperCAmelCase_ ):
os.makedirs(UpperCAmelCase_ )
_UpperCamelCase : List[Any] = model.state_dict()
def to_tf_var_name(UpperCAmelCase_ ):
for patt, repl in iter(UpperCAmelCase_ ):
_UpperCamelCase : int = name.replace(UpperCAmelCase_ , UpperCAmelCase_ )
return f'bert/{name}'
def create_tf_var(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Optional[Any] = tf.dtypes.as_dtype(tensor.dtype )
_UpperCamelCase : Union[str, Any] = tf.get_variable(dtype=UpperCAmelCase_ , shape=tensor.shape , name=UpperCAmelCase_ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(UpperCAmelCase_ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
_UpperCamelCase : Optional[int] = to_tf_var_name(UpperCAmelCase_ )
_UpperCamelCase : str = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
_UpperCamelCase : Optional[int] = torch_tensor.T
_UpperCamelCase : Optional[Any] = create_tf_var(tensor=UpperCAmelCase_ , name=UpperCAmelCase_ , session=UpperCAmelCase_ )
tf.keras.backend.set_value(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Any = session.run(UpperCAmelCase_ )
print(f'Successfully created {tf_name}: {np.allclose(UpperCAmelCase_ , UpperCAmelCase_ )}' )
_UpperCamelCase : Optional[Any] = tf.train.Saver(tf.trainable_variables() )
saver.save(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , model_name.replace('-' , '_' ) + '.ckpt' ) )
def A__ ( UpperCAmelCase_=None ):
_UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help='model name e.g. bert-base-uncased' )
parser.add_argument(
'--cache_dir' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , required=UpperCAmelCase_ , help='Directory containing pytorch model' )
parser.add_argument('--pytorch_model_path' , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help='/path/to/<pytorch-model-name>.bin' )
parser.add_argument('--tf_cache_dir' , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help='Directory in which to save tensorflow model' )
_UpperCamelCase : Optional[int] = parser.parse_args(UpperCAmelCase_ )
_UpperCamelCase : Any = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=UpperCAmelCase_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 195 |
'''simple docstring'''
from pathlib import Path
import fire
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : int = Path(UpperCAmelCase_ )
_UpperCamelCase : str = Path(UpperCAmelCase_ )
dest_dir.mkdir(exist_ok=UpperCAmelCase_ )
for path in src_dir.iterdir():
_UpperCamelCase : int = [x.rstrip() for x in list(path.open().readlines() )][:n]
_UpperCamelCase : Any = dest_dir.joinpath(path.name )
print(UpperCAmelCase_ )
dest_path.open('w' ).write('\n'.join(UpperCAmelCase_ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 195 | 1 |
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
__lowerCamelCase : List[str] = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class a__ ( nn.Module ):
def __init__( self : Tuple,_A : str ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ : List[str] = torchvision.models.resnetaaa(pretrained=__A )
SCREAMING_SNAKE_CASE_ : Any = list(model.children() )[:-2]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = nn.Sequential(*__A )
SCREAMING_SNAKE_CASE_ : Dict = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def __UpperCamelCase ( self : Tuple,_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.pool(self.model(__A ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.flatten(__A,start_dim=2 )
SCREAMING_SNAKE_CASE_ : Any = out.transpose(1,2 ).contiguous()
return out # BxNx2048
class a__ ( __lowercase ):
def __init__( self : Optional[Any],_A : List[Any],_A : Union[str, Any],_A : List[str],_A : List[Any],_A : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = [json.loads(__A ) for l in open(__A )]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.dirname(__A )
SCREAMING_SNAKE_CASE_ : int = tokenizer
SCREAMING_SNAKE_CASE_ : Dict = labels
SCREAMING_SNAKE_CASE_ : int = len(__A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = max_seq_length
SCREAMING_SNAKE_CASE_ : Optional[int] = transforms
def __len__( self : Tuple ):
"""simple docstring"""
return len(self.data )
def __getitem__( self : Optional[int],_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = torch.LongTensor(self.tokenizer.encode(self.data[index]["text"],add_special_tokens=__A ) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = sentence[0], sentence[1:-1], sentence[-1]
SCREAMING_SNAKE_CASE_ : str = sentence[: self.max_seq_length]
SCREAMING_SNAKE_CASE_ : Dict = torch.zeros(self.n_classes )
SCREAMING_SNAKE_CASE_ : List[str] = 1
SCREAMING_SNAKE_CASE_ : Tuple = Image.open(os.path.join(self.data_dir,self.data[index]["img"] ) ).convert("RGB" )
SCREAMING_SNAKE_CASE_ : int = self.transforms(__A )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = Counter()
for row in self.data:
label_freqs.update(row["label"] )
return label_freqs
def _snake_case ( lowerCAmelCase : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = [len(row["sentence"] ) for row in batch]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = len(a__ ), max(a__ )
SCREAMING_SNAKE_CASE_ : List[str] = torch.zeros(a__ , a__ , dtype=torch.long )
SCREAMING_SNAKE_CASE_ : Any = torch.zeros(a__ , a__ , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(a__ , a__ ) ):
SCREAMING_SNAKE_CASE_ : str = input_row["sentence"]
SCREAMING_SNAKE_CASE_ : Optional[int] = 1
SCREAMING_SNAKE_CASE_ : List[str] = torch.stack([row["image"] for row in batch] )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.stack([row["label"] for row in batch] )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.stack([row["image_start_token"] for row in batch] )
SCREAMING_SNAKE_CASE_ : List[str] = torch.stack([row["image_end_token"] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def _snake_case ( ):
"""simple docstring"""
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def _snake_case ( ):
"""simple docstring"""
return transforms.Compose(
[
transforms.Resize(2_5_6 ),
transforms.CenterCrop(2_2_4 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46777044, 0.44531429, 0.40661017] , std=[0.12221994, 0.12145835, 0.14380469] , ),
] )
| 714 | import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__lowerCamelCase : Optional[int] = '''pt'''
elif is_tf_available():
__lowerCamelCase : str = '''tf'''
else:
__lowerCamelCase : int = '''jax'''
class a__ ( A__ , unittest.TestCase ):
A = PerceiverTokenizer
A = False
def __UpperCamelCase ( self : str ):
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE_ : List[Any] = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
return PerceiverTokenizer.from_pretrained("deepmind/language-perceiver" )
def __UpperCamelCase ( self : Optional[int],**_A : List[Any] ):
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname,**_A )
def __UpperCamelCase ( self : List[Any],_A : Optional[Any],_A : str=False,_A : Tuple=20,_A : Tuple=5 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = []
for i in range(len(_A ) ):
try:
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer.decode([i],clean_up_tokenization_spaces=_A )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
SCREAMING_SNAKE_CASE_ : int = list(filter(lambda _A : re.match(R"^[ a-zA-Z]+$",t[1] ),_A ) )
SCREAMING_SNAKE_CASE_ : Dict = list(filter(lambda _A : [t[0]] == tokenizer.encode(t[1],add_special_tokens=_A ),_A ) )
if max_length is not None and len(_A ) > max_length:
SCREAMING_SNAKE_CASE_ : List[str] = toks[:max_length]
if min_length is not None and len(_A ) < min_length and len(_A ) > 0:
while len(_A ) < min_length:
SCREAMING_SNAKE_CASE_ : int = toks + toks
# toks_str = [t[1] for t in toks]
SCREAMING_SNAKE_CASE_ : List[Any] = [t[0] for t in toks]
# Ensure consistency
SCREAMING_SNAKE_CASE_ : str = tokenizer.decode(_A,clean_up_tokenization_spaces=_A )
if " " not in output_txt and len(_A ) > 1:
SCREAMING_SNAKE_CASE_ : Dict = (
tokenizer.decode([toks_ids[0]],clean_up_tokenization_spaces=_A )
+ " "
+ tokenizer.decode(toks_ids[1:],clean_up_tokenization_spaces=_A )
)
if with_prefix_space:
SCREAMING_SNAKE_CASE_ : str = " " + output_txt
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.encode(_A,add_special_tokens=_A )
return output_txt, output_ids
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.perceiver_tokenizer
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "Unicode €."
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer(_A )
SCREAMING_SNAKE_CASE_ : int = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded["input_ids"],_A )
# decoding
SCREAMING_SNAKE_CASE_ : str = tokenizer.decode(_A )
self.assertEqual(_A,"[CLS]Unicode €.[SEP]" )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer("e è é ê ë" )
SCREAMING_SNAKE_CASE_ : Tuple = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded["input_ids"],_A )
# decoding
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer.decode(_A )
self.assertEqual(_A,"[CLS]e è é ê ë[SEP]" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ),"[CLS]e è é ê ë[SEP]" )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.perceiver_tokenizer
SCREAMING_SNAKE_CASE_ : List[str] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
SCREAMING_SNAKE_CASE_ : Optional[Any] = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
SCREAMING_SNAKE_CASE_ : str = tokenizer(_A,padding=_A,return_tensors=_A )
self.assertIsInstance(_A,_A )
if FRAMEWORK != "jax":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(batch.input_ids.numpy()[0] )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_A,_A )
self.assertEqual((2, 38),batch.input_ids.shape )
self.assertEqual((2, 38),batch.attention_mask.shape )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.perceiver_tokenizer
SCREAMING_SNAKE_CASE_ : Any = ["A long paragraph for summarization.", "Another paragraph for summarization."]
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer(_A,padding=_A,return_tensors=_A )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids",_A )
self.assertIn("attention_mask",_A )
self.assertNotIn("decoder_input_ids",_A )
self.assertNotIn("decoder_attention_mask",_A )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.perceiver_tokenizer
SCREAMING_SNAKE_CASE_ : int = [
"Summary of the text.",
"Another summary.",
]
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer(
text_target=_A,max_length=32,padding="max_length",truncation=_A,return_tensors=_A )
self.assertEqual(32,targets["input_ids"].shape[1] )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length,42 )
# Now let's start the test
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE_ : Tuple = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ : str = " He is very happy, UNwant\u00E9d,running"
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer.encode(_A,add_special_tokens=_A )
tokenizer.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ : int = tokenizer.__class__.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = after_tokenizer.encode(_A,add_special_tokens=_A )
self.assertListEqual(_A,_A )
shutil.rmtree(_A )
SCREAMING_SNAKE_CASE_ : Tuple = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE_ : Optional[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ : Tuple = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
SCREAMING_SNAKE_CASE_ : int = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
SCREAMING_SNAKE_CASE_ : str = tokenizer.encode(_A,add_special_tokens=_A )
tokenizer.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ : str = tokenizer.__class__.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ : str = after_tokenizer.encode(_A,add_special_tokens=_A )
self.assertListEqual(_A,_A )
self.assertIn("new_additional_special_token",after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length,42 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.__class__.from_pretrained(_A,model_max_length=43 )
self.assertEqual(tokenizer.model_max_length,43 )
shutil.rmtree(_A )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
with open(os.path.join(_A,"special_tokens_map.json" ),encoding="utf-8" ) as json_file:
SCREAMING_SNAKE_CASE_ : Optional[int] = json.load(_A )
with open(os.path.join(_A,"tokenizer_config.json" ),encoding="utf-8" ) as json_file:
SCREAMING_SNAKE_CASE_ : int = json.load(_A )
SCREAMING_SNAKE_CASE_ : Any = [F'<extra_id_{i}>' for i in range(125 )]
SCREAMING_SNAKE_CASE_ : List[Any] = added_tokens_extra_ids + [
"an_additional_special_token"
]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(_A,"special_tokens_map.json" ),"w",encoding="utf-8" ) as outfile:
json.dump(_A,_A )
with open(os.path.join(_A,"tokenizer_config.json" ),"w",encoding="utf-8" ) as outfile:
json.dump(_A,_A )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
SCREAMING_SNAKE_CASE_ : Dict = tokenizer_class.from_pretrained(
_A,)
self.assertIn(
"an_additional_special_token",tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["an_additional_special_token"],tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ),)
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
SCREAMING_SNAKE_CASE_ : Union[str, Any] = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token",lstrip=_A )]
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer_class.from_pretrained(
_A,additional_special_tokens=_A,)
self.assertIn("a_new_additional_special_token",tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"],tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ),)
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ),"�" )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
pass
def __UpperCamelCase ( self : int ):
"""simple docstring"""
pass
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
pass
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
pass
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.get_tokenizers(fast=_A,do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
SCREAMING_SNAKE_CASE_ : str = ["[CLS]", "t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "s", "t", "[SEP]"]
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.convert_tokens_to_string(_A )
self.assertIsInstance(_A,_A )
| 316 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
_a = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCAmelCase( lowerCamelCase ):
def __init__( self , __a , __a) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(unet=__a , scheduler=__a)
@torch.no_grad()
def __call__( self , __a = 1 , __a = 1_00 , __a = None , __a = None , __a = True , ) -> Union[AudioPipelineOutput, Tuple]:
'''simple docstring'''
if audio_length_in_s is None:
_UpperCamelCase = self.unet.config.sample_size / self.unet.config.sample_rate
_UpperCamelCase = audio_length_in_s * self.unet.config.sample_rate
_UpperCamelCase = 2 ** len(self.unet.up_blocks)
if sample_size < 3 * down_scale_factor:
raise ValueError(
F'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
F''' {3 * down_scale_factor / self.unet.config.sample_rate}.''')
_UpperCamelCase = int(__a)
if sample_size % down_scale_factor != 0:
_UpperCamelCase = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
F''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
''' process.''')
_UpperCamelCase = int(__a)
_UpperCamelCase = next(iter(self.unet.parameters())).dtype
_UpperCamelCase = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(__a , __a) and len(__a) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(__a)}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''')
_UpperCamelCase = randn_tensor(__a , generator=__a , device=self.device , dtype=__a)
# set step values
self.scheduler.set_timesteps(__a , device=audio.device)
_UpperCamelCase = self.scheduler.timesteps.to(__a)
for t in self.progress_bar(self.scheduler.timesteps):
# 1. predict noise model_output
_UpperCamelCase = self.unet(__a , __a).sample
# 2. compute previous image: x_t -> t_t-1
_UpperCamelCase = self.scheduler.step(__a , __a , __a).prev_sample
_UpperCamelCase = audio.clamp(-1 , 1).float().cpu().numpy()
_UpperCamelCase = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=__a)
| 19 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Optional[Any] = {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json''',
'''google/bigbird-roberta-large''': '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json''',
'''google/bigbird-base-trivia-itc''': '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json''',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Tuple = "big_bird"
def __init__( self : Tuple , A : Tuple=5_03_58 , A : Any=7_68 , A : Union[str, Any]=12 , A : List[str]=12 , A : Dict=30_72 , A : int="gelu_new" , A : Optional[int]=0.1 , A : Optional[int]=0.1 , A : Dict=40_96 , A : Tuple=2 , A : Union[str, Any]=0.02 , A : str=1e-12 , A : Optional[Any]=True , A : Union[str, Any]=0 , A : Optional[int]=1 , A : Optional[int]=2 , A : Any=66 , A : List[Any]="block_sparse" , A : List[Any]=True , A : Union[str, Any]=False , A : Optional[Any]=64 , A : Optional[Any]=3 , A : Tuple=None , **A : Union[str, Any] , ) -> Tuple:
super().__init__(
pad_token_id=A , bos_token_id=A , eos_token_id=A , sep_token_id=A , **A , )
lowercase_ : Tuple = vocab_size
lowercase_ : int = max_position_embeddings
lowercase_ : int = hidden_size
lowercase_ : List[str] = num_hidden_layers
lowercase_ : Union[str, Any] = num_attention_heads
lowercase_ : str = intermediate_size
lowercase_ : Optional[Any] = hidden_act
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : List[str] = attention_probs_dropout_prob
lowercase_ : Optional[int] = initializer_range
lowercase_ : List[str] = type_vocab_size
lowercase_ : Optional[Any] = layer_norm_eps
lowercase_ : int = use_cache
lowercase_ : int = rescale_embeddings
lowercase_ : Optional[Any] = attention_type
lowercase_ : str = use_bias
lowercase_ : Dict = block_size
lowercase_ : str = num_random_blocks
lowercase_ : Optional[Any] = classifier_dropout
class _UpperCAmelCase ( _A ):
@property
def A ( self : str ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowercase_ : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase_ : Optional[int] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 231 | 0 |
"""simple docstring"""
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_lowercase = '''src/diffusers'''
_lowercase = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
_lowercase = importlib.util.spec_from_file_location(
"""diffusers""",
os.path.join(DIFFUSERS_PATH, """__init__.py"""),
submodule_search_locations=[DIFFUSERS_PATH],
)
_lowercase = spec.loader.load_module()
def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : List[str] ):
return line.startswith(lowerCAmelCase__ ) or len(lowerCAmelCase__ ) <= 1 or re.search(r'^\s*\)(\s*->.*:|:)\s*$' , lowerCAmelCase__ ) is not None
def _snake_case ( snake_case__ : Optional[int] ):
A = object_name.split('.' )
A = 0
# First let's find the module where our object lives.
A = parts[i]
while i < len(lowerCAmelCase__ ) and not os.path.isfile(os.path.join(lowerCAmelCase__ , F'{module}.py' ) ):
i += 1
if i < len(lowerCAmelCase__ ):
A = os.path.join(lowerCAmelCase__ , parts[i] )
if i >= len(lowerCAmelCase__ ):
raise ValueError(F'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(lowerCAmelCase__ , F'{module}.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
A = f.readlines()
# Now let's find the class / func in the code!
A = ''
A = 0
for name in parts[i + 1 :]:
while (
line_index < len(lowerCAmelCase__ ) and re.search(rF'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(lowerCAmelCase__ ):
raise ValueError(F' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
A = line_index
while line_index < len(lowerCAmelCase__ ) and _should_continue(lines[line_index] , lowerCAmelCase__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A = lines[start_index:line_index]
return "".join(lowerCAmelCase__ )
_lowercase = re.compile(r"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""")
_lowercase = re.compile(r"""^\s*(\S+)->(\S+)(\s+.*|$)""")
_lowercase = re.compile(r"""<FILL\s+[^>]*>""")
def _snake_case ( snake_case__ : Any ):
A = code.split('\n' )
A = 0
while idx < len(lowerCAmelCase__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(lowerCAmelCase__ ):
return re.search(r'^(\s*)\S' , lines[idx] ).groups()[0]
return ""
def _snake_case ( snake_case__ : Optional[int] ):
A = len(get_indent(lowerCAmelCase__ ) ) > 0
if has_indent:
A = F'class Bla:\n{code}'
A = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=lowerCAmelCase__ )
A = black.format_str(lowerCAmelCase__ , mode=lowerCAmelCase__ )
A , A = style_docstrings_in_code(lowerCAmelCase__ )
return result[len('class Bla:\n' ) :] if has_indent else result
def _snake_case ( snake_case__ : str , snake_case__ : str=False ):
with open(lowerCAmelCase__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
A = f.readlines()
A = []
A = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(lowerCAmelCase__ ):
A = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
A , A , A = search.groups()
A = find_code_in_diffusers(lowerCAmelCase__ )
A = get_indent(lowerCAmelCase__ )
A = line_index + 1 if indent == theoretical_indent else line_index + 2
A = theoretical_indent
A = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
A = True
while line_index < len(lowerCAmelCase__ ) and should_continue:
line_index += 1
if line_index >= len(lowerCAmelCase__ ):
break
A = lines[line_index]
A = _should_continue(lowerCAmelCase__ , lowerCAmelCase__ ) and re.search(F'^{indent}# End copy' , lowerCAmelCase__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A = lines[start_index:line_index]
A = ''.join(lowerCAmelCase__ )
# Remove any nested `Copied from` comments to avoid circular copies
A = [line for line in theoretical_code.split('\n' ) if _re_copy_warning.search(lowerCAmelCase__ ) is None]
A = '\n'.join(lowerCAmelCase__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(lowerCAmelCase__ ) > 0:
A = replace_pattern.replace('with' , '' ).split(',' )
A = [_re_replace_pattern.search(lowerCAmelCase__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
A , A , A = pattern.groups()
A = re.sub(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if option.strip() == "all-casing":
A = re.sub(obja.lower() , obja.lower() , lowerCAmelCase__ )
A = re.sub(obja.upper() , obja.upper() , lowerCAmelCase__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
A = blackify(lines[start_index - 1] + theoretical_code )
A = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
A = lines[:start_index] + [theoretical_code] + lines[line_index:]
A = start_index + 1
if overwrite and len(lowerCAmelCase__ ) > 0:
# Warn the user a file has been modified.
print(F'Detected changes, rewriting {filename}.' )
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lowerCAmelCase__ )
return diffs
def _snake_case ( snake_case__ : Optional[int] = False ):
A = glob.glob(os.path.join(lowerCAmelCase__ , '**/*.py' ) , recursive=lowerCAmelCase__ )
A = []
for filename in all_files:
A = is_copy_consistent(lowerCAmelCase__ , lowerCAmelCase__ )
diffs += [F'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(lowerCAmelCase__ ) > 0:
A = '\n'.join(lowerCAmelCase__ )
raise Exception(
'Found the following copy inconsistencies:\n'
+ diff
+ '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.' )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
_lowercase = parser.parse_args()
check_copies(args.fix_and_overwrite) | 705 |
"""simple docstring"""
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
_lowercase = data_utils.TransfoXLTokenizer
_lowercase = data_utils.TransfoXLCorpus
_lowercase = data_utils
_lowercase = data_utils
def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : int ):
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(snake_case__ , 'rb' ) as fp:
A = pickle.load(snake_case__ , encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
A = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print(F'Save vocabulary to {pytorch_vocab_dump_path}' )
A = corpus.vocab.__dict__
torch.save(snake_case__ , snake_case__ )
A = corpus.__dict__
corpus_dict_no_vocab.pop('vocab' , snake_case__ )
A = pytorch_dump_folder_path + '/' + CORPUS_NAME
print(F'Save dataset to {pytorch_dataset_dump_path}' )
torch.save(snake_case__ , snake_case__ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
A = os.path.abspath(snake_case__ )
A = os.path.abspath(snake_case__ )
print(F'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
A = TransfoXLConfig()
else:
A = TransfoXLConfig.from_json_file(snake_case__ )
print(F'Building PyTorch model from configuration: {config}' )
A = TransfoXLLMHeadModel(snake_case__ )
A = load_tf_weights_in_transfo_xl(snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
A = os.path.join(snake_case__ , snake_case__ )
A = os.path.join(snake_case__ , snake_case__ )
print(F'Save PyTorch model to {os.path.abspath(snake_case__ )}' )
torch.save(model.state_dict() , snake_case__ )
print(F'Save configuration file to {os.path.abspath(snake_case__ )}' )
with open(snake_case__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--tf_checkpoint_path''',
default='''''',
type=str,
help='''An optional path to a TensorFlow checkpoint path to be converted.''',
)
parser.add_argument(
'''--transfo_xl_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--transfo_xl_dataset_file''',
default='''''',
type=str,
help='''An optional dataset file to be converted in a vocabulary.''',
)
_lowercase = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
) | 22 | 0 |
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
a__ = '''bert-base-cased'''
a__ = '''fp16'''
a__ = '''bf16'''
a__ = [FPaa, BFaa]
@require_fsdp
@require_cuda
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self ) -> List[str]:
super().setUp()
_a : str = dict(
ACCELERATE_USE_FSDP='''true''' , MASTER_ADDR='''localhost''' , MASTER_PORT='''10999''' , RANK='''0''' , LOCAL_RANK='''0''' , WORLD_SIZE='''1''' , )
def __lowercase ( self ) -> str:
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(_a ):
_a : Union[str, Any] = self.dist_env.copy()
_a : int = F"""{i + 1}"""
_a : Dict = strategy
with mockenv_context(**_a ):
_a : Tuple = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def __lowercase ( self ) -> str:
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(_a ):
_a : Dict = self.dist_env.copy()
_a : Any = prefetch_policy
with mockenv_context(**_a ):
_a : Tuple = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def __lowercase ( self ) -> List[Any]:
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(_a ):
_a : List[Any] = self.dist_env.copy()
_a : List[Any] = state_dict_type
with mockenv_context(**_a ):
_a : int = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def __lowercase ( self ) -> List[Any]:
_a : List[Any] = AutoModel.from_pretrained(_a )
for policy in FSDP_AUTO_WRAP_POLICY:
_a : Any = self.dist_env.copy()
_a : str = policy
if policy == "TRANSFORMER_BASED_WRAP":
_a : Optional[Any] = '''BertLayer'''
elif policy == "SIZE_BASED_WRAP":
_a : Optional[int] = '''2000'''
with mockenv_context(**_a ):
_a : List[Any] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(_a )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
_a : Optional[int] = self.dist_env.copy()
_a : Union[str, Any] = '''TRANSFORMER_BASED_WRAP'''
_a : Optional[Any] = '''T5Layer'''
with mockenv_context(**_a ):
_a : Tuple = FullyShardedDataParallelPlugin()
with self.assertRaises(_a ) as cm:
fsdp_plugin.set_auto_wrap_policy(_a )
self.assertTrue('''Could not find the transformer layer class to wrap in the model.''' in str(cm.exception ) )
_a : int = self.dist_env.copy()
_a : Optional[Any] = '''SIZE_BASED_WRAP'''
_a : Optional[Any] = '''0'''
with mockenv_context(**_a ):
_a : str = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(_a )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def __lowercase ( self ) -> Optional[int]:
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
_a : List[str] = self.dist_env.copy()
_a : Optional[Any] = mp_dtype
with mockenv_context(**_a ):
_a : Tuple = Accelerator()
if mp_dtype == "fp16":
_a : Union[str, Any] = torch.floataa
elif mp_dtype == "bf16":
_a : str = torch.bfloataa
_a : Tuple = MixedPrecision(param_dtype=_a , reduce_dtype=_a , buffer_dtype=_a )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , _a )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , _a ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(_a )
def __lowercase ( self ) -> Optional[Any]:
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
_a : Union[str, Any] = self.dist_env.copy()
_a : Tuple = str(_a ).lower()
with mockenv_context(**_a ):
_a : int = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=_a ) )
@require_fsdp
@require_multi_gpu
@slow
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self ) -> Union[str, Any]:
super().setUp()
_a : List[Any] = 0.82
_a : str = [
'''fsdp_shard_grad_op_transformer_based_wrap''',
'''fsdp_full_shard_transformer_based_wrap''',
]
_a : str = {
'''multi_gpu_fp16''': 3_2_0_0,
'''fsdp_shard_grad_op_transformer_based_wrap_fp16''': 2_0_0_0,
'''fsdp_full_shard_transformer_based_wrap_fp16''': 1_9_0_0,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
_a : Any = 1_6_0
_a : str = 1_6_0
_a : str = inspect.getfile(accelerate.test_utils )
_a : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps'''] )
def __lowercase ( self ) -> List[Any]:
_a : str = os.path.join(self.test_scripts_folder , '''test_performance.py''' )
_a : Union[str, Any] = ['''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', '''--use_fsdp''']
for config in self.performance_configs:
_a : Optional[int] = cmd.copy()
for i, strategy in enumerate(_a ):
if strategy.lower() in config:
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" )
break
if "fp32" in config:
cmd_config.append('''--mixed_precision=no''' )
else:
cmd_config.append('''--mixed_precision=fp16''' )
if "cpu_offload" in config:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
F"""--performance_lower_bound={self.performance_lower_bound}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
def __lowercase ( self ) -> str:
_a : List[Any] = os.path.join(self.test_scripts_folder , '''test_checkpointing.py''' )
_a : Dict = [
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
'''--use_fsdp''',
'''--mixed_precision=fp16''',
'''--fsdp_transformer_layer_cls_to_wrap=BertLayer''',
]
for i, strategy in enumerate(_a ):
_a : int = cmd.copy()
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" )
if strategy != "FULL_SHARD":
continue
_a : int = len(_a )
for state_dict_type in FSDP_STATE_DICT_TYPE:
_a : Dict = cmd_config[:state_dict_config_index]
cmd_config.append(F"""--fsdp_state_dict_type={state_dict_type}""" )
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
'''--partial_train_epoch=1''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
_a : str = cmd_config[:-1]
_a : Union[str, Any] = os.path.join(self.tmpdir , '''epoch_0''' )
cmd_config.extend(
[
F"""--resume_from_checkpoint={resume_from_checkpoint}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
def __lowercase ( self ) -> int:
_a : Optional[int] = os.path.join(self.test_scripts_folder , '''test_peak_memory_usage.py''' )
_a : Tuple = [
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
_a : int = cmd.copy()
if "fp16" in spec:
cmd_config.extend(['''--mixed_precision=fp16'''] )
else:
cmd_config.extend(['''--mixed_precision=no'''] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(['''--use_fsdp'''] )
for i, strategy in enumerate(_a ):
if strategy.lower() in spec:
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" )
break
if "cpu_offload" in spec:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
F"""--peak_memory_upper_bound={peak_mem_upper_bound}""",
F"""--n_train={self.n_train}""",
F"""--n_val={self.n_val}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
| 14 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = LxmertTokenizer
UpperCAmelCase__ : Optional[Any] = LxmertTokenizerFast
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : Dict = True
def __lowercase ( self ) -> Union[str, Any]:
super().setUp()
_a : int = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_a : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowercase ( self , _a ) -> List[str]:
_a : Tuple = '''UNwant\u00E9d,running'''
_a : str = '''unwanted, running'''
return input_text, output_text
def __lowercase ( self ) -> List[Any]:
_a : str = self.tokenizer_class(self.vocab_file )
_a : str = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_a , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [7, 4, 5, 1_0, 8, 9] )
def __lowercase ( self ) -> List[Any]:
if not self.test_rust_tokenizer:
return
_a : Optional[Any] = self.get_tokenizer()
_a : str = self.get_rust_tokenizer()
_a : Optional[Any] = '''I was born in 92000, and this is falsé.'''
_a : Optional[Any] = tokenizer.tokenize(_a )
_a : List[Any] = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
_a : List[Any] = tokenizer.encode(_a , add_special_tokens=_a )
_a : Any = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
_a : Dict = self.get_rust_tokenizer()
_a : Optional[int] = tokenizer.encode(_a )
_a : Dict = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
| 14 | 1 |
import os
import sys
import transformers
__lowercase : str = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None) | 706 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
__lowercase : List[str] = ["""bert-base-uncased""", """bert-base-cased"""]
__lowercase : Tuple = """hf-internal-testing/tiny-bert-tf-only"""
if is_tf_available():
class lowerCAmelCase ( tf.keras.Model ):
"""simple docstring"""
def __init__( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
lowerCamelCase_ = tokenizer
lowerCamelCase_ = AutoConfig.from_pretrained(UpperCamelCase__ )
lowerCamelCase_ = TFAutoModel.from_config(UpperCamelCase__ )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer(UpperCamelCase__ )
lowerCamelCase_ = self.bert(**UpperCamelCase__ )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ) -> int:
'''simple docstring'''
super().setUp()
lowerCamelCase_ = [
BertTokenizer.from_pretrained(UpperCamelCase__ ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
lowerCamelCase_ = [TFBertTokenizer.from_pretrained(UpperCamelCase__ ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(UpperCamelCase__ , use_fast_bert_tokenizer=UpperCamelCase__ )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
lowerCamelCase_ = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
lowerCamelCase_ = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def _lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
lowerCamelCase_ = tokenizer(UpperCamelCase__ , return_tensors='''tf''' , padding='''longest''' )
lowerCamelCase_ = tf_tokenizer(UpperCamelCase__ )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def _lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase_ = tf_tokenizer(self.paired_sentences )
lowerCamelCase_ = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def _lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase_ = tf.function(UpperCamelCase__ )
for test_inputs in (self.test_sentences, self.paired_sentences):
lowerCamelCase_ = tf.constant(UpperCamelCase__ )
lowerCamelCase_ = compiled_tokenizer(UpperCamelCase__ )
lowerCamelCase_ = tf_tokenizer(UpperCamelCase__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def _lowerCAmelCase ( self ) -> int:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase_ = ModelToSave(tokenizer=UpperCamelCase__ )
lowerCamelCase_ = tf.convert_to_tensor(self.test_sentences )
lowerCamelCase_ = model(UpperCamelCase__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
lowerCamelCase_ = Path(UpperCamelCase__ ) / '''saved.model'''
model.save(UpperCamelCase__ )
lowerCamelCase_ = tf.keras.models.load_model(UpperCamelCase__ )
lowerCamelCase_ = loaded_model(UpperCamelCase__ )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1e-5 ) | 66 | 0 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=33 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , ) -> int:
'''simple docstring'''
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_input_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = EsmModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = EsmForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = EsmForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = ()
SCREAMING_SNAKE_CASE_ = (
{
'feature-extraction': EsmModel,
'fill-mask': EsmForMaskedLM,
'text-classification': EsmForSequenceClassification,
'token-classification': EsmForTokenClassification,
'zero-shot': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = True
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = EsmModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase_ = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = EsmModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase_ = EsmEmbeddings(config=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowerCamelCase_ = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowerCamelCase_ = create_position_ids_from_input_ids(SCREAMING_SNAKE_CASE_ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase_ = EsmEmbeddings(config=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.empty(2 , 4 , 30 )
lowerCamelCase_ = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowerCamelCase_ = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowerCamelCase_ = embeddings.create_position_ids_from_inputs_embeds(SCREAMING_SNAKE_CASE_ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
@unittest.skip('Esm does not support embedding resizing' )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('Esm does not support embedding resizing' )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
pass
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
@slow
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
with torch.no_grad():
lowerCamelCase_ = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowerCamelCase_ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )[0]
lowerCamelCase_ = 33
lowerCamelCase_ = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.tensor(
[[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
@slow
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
with torch.no_grad():
lowerCamelCase_ = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowerCamelCase_ = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )[0]
# compare the actual values for a slice.
lowerCamelCase_ = torch.tensor(
[[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 42 |
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( A__ , unittest.TestCase ):
"""simple docstring"""
_a = LxmertTokenizer
_a = LxmertTokenizerFast
_a = True
_a = True
def lowerCAmelCase__ ( self ):
'''simple docstring'''
super().setUp()
UpperCamelCase__ :Any = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
UpperCamelCase__ :str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = '''UNwant\u00E9d,running'''
UpperCamelCase__ :Union[str, Any] = '''unwanted, running'''
return input_text, output_text
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.tokenizer_class(self.vocab_file )
UpperCamelCase__ :List[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(UpperCamelCase_ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [7, 4, 5, 10, 8, 9] )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
UpperCamelCase__ :str = self.get_tokenizer()
UpperCamelCase__ :Union[str, Any] = self.get_rust_tokenizer()
UpperCamelCase__ :int = '''I was born in 92000, and this is falsé.'''
UpperCamelCase__ :Optional[Any] = tokenizer.tokenize(UpperCamelCase_ )
UpperCamelCase__ :str = rust_tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
UpperCamelCase__ :int = rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = self.get_rust_tokenizer()
UpperCamelCase__ :Any = tokenizer.encode(UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = rust_tokenizer.encode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) | 189 | 0 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowerCAmelCase__ = object()
# For specifying empty leaf dict `{}`
lowerCAmelCase__ = object()
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(SCREAMING_SNAKE_CASE ) - len(SCREAMING_SNAKE_CASE ) + 1 ):
lowerCAmelCase : int = [x.match(SCREAMING_SNAKE_CASE ) for x, y in zip(SCREAMING_SNAKE_CASE , ks[i:] )]
if matches and all(SCREAMING_SNAKE_CASE ):
return True
return False
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
def replace(SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict ):
for rule, replacement in rules:
if _match(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return replacement
return val
return replace
def a__ ( ):
'''simple docstring'''
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , SCREAMING_SNAKE_CASE )),
(("transformer", "wte", "embedding"), P("mp" , SCREAMING_SNAKE_CASE )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(SCREAMING_SNAKE_CASE , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , SCREAMING_SNAKE_CASE )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(SCREAMING_SNAKE_CASE , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , SCREAMING_SNAKE_CASE )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def a__ ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase : Any = _get_partition_rules()
lowerCAmelCase : Tuple = _replacement_rules(SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = {k: _unmatched for k in flatten_dict(SCREAMING_SNAKE_CASE )}
lowerCAmelCase : List[Any] = {k: replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(SCREAMING_SNAKE_CASE ) )
| 681 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int = 1_0 , SCREAMING_SNAKE_CASE : int = 2_2 ):
'''simple docstring'''
lowerCAmelCase : Dict = range(1 , SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = range(1 , SCREAMING_SNAKE_CASE )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"{solution(10, 22) = }")
| 681 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "speech_to_text_2"
snake_case__ = ["past_key_values"]
snake_case__ = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : int , __SCREAMING_SNAKE_CASE : Optional[Any]=1_0000 , __SCREAMING_SNAKE_CASE : Tuple=6 , __SCREAMING_SNAKE_CASE : Any=2048 , __SCREAMING_SNAKE_CASE : List[Any]=4 , __SCREAMING_SNAKE_CASE : Dict=0.0 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Dict="relu" , __SCREAMING_SNAKE_CASE : str=256 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : List[str]=0.0 , __SCREAMING_SNAKE_CASE : Optional[int]=0.0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : Tuple=1 , __SCREAMING_SNAKE_CASE : Dict=0 , __SCREAMING_SNAKE_CASE : Optional[Any]=2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1024 , **__SCREAMING_SNAKE_CASE : Optional[Any] , ) -> str:
a_ : str = vocab_size
a_ : Dict = d_model
a_ : Union[str, Any] = decoder_ffn_dim
a_ : str = decoder_layers
a_ : Dict = decoder_attention_heads
a_ : int = dropout
a_ : int = attention_dropout
a_ : str = activation_dropout
a_ : List[str] = activation_function
a_ : str = init_std
a_ : List[str] = decoder_layerdrop
a_ : Optional[Any] = use_cache
a_ : Union[str, Any] = decoder_layers
a_ : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
a_ : Optional[int] = max_target_positions
super().__init__(
pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , decoder_start_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
| 466 |
'''simple docstring'''
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__lowerCAmelCase = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
__lowerCAmelCase = {
# fairseq:
'wmt19-ru-en': {'length_penalty': 1.1},
'wmt19-en-ru': {'length_penalty': 1.15},
'wmt19-en-de': {'length_penalty': 1.0},
'wmt19-de-en': {'length_penalty': 1.1},
# allenai:
'wmt16-en-de-dist-12-1': {'length_penalty': 0.6},
'wmt16-en-de-dist-6-1': {'length_penalty': 0.6},
'wmt16-en-de-12-1': {'length_penalty': 0.8},
'wmt19-de-en-6-6-base': {'length_penalty': 0.6},
'wmt19-de-en-6-6-big': {'length_penalty': 0.6},
}
# this remaps the different models to their organization names
__lowerCAmelCase = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__lowerCAmelCase = 'facebook'
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
__lowerCAmelCase = 'allenai'
def _UpperCAmelCase ( __A : Union[str, Any] ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
a_ : Union[str, Any] = dict((re.sub(R'''@@$''' , '''''' , __A ), v) if k.endswith('''@@''' ) else (re.sub(R'''$''' , '''</w>''' , __A ), v) for k, v in d.items() )
a_ : str = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[f'{k}</w>']
a_ : str = d[k] # restore
return da
def _UpperCAmelCase ( __A : List[Any] , __A : List[str] ):
# prep
assert os.path.exists(__A )
os.makedirs(__A , exist_ok=__A )
print(f'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
a_ : Union[str, Any] = basename(__A )
a_ : Optional[Any] = dirname(__A )
a_ : List[Any] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
a_ : str = cls.hub_models()
a_ : List[str] = {'''bpe''': '''fastbpe''', '''tokenizer''': '''moses'''}
a_ : str = '''.'''
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f'using checkpoint {checkpoint_file}' )
a_ : Any = hub_utils.from_pretrained(
__A , __A , __A , archive_map=__A , **__A )
a_ : Optional[int] = vars(chkpt['''args''']['''model'''] )
a_ : Any = args['''source_lang''']
a_ : List[Any] = args['''target_lang''']
a_ : Union[str, Any] = dirname(__A )
a_ : int = basename(__A )
# dicts
a_ : Optional[Any] = os.path.join(__A , f'dict.{src_lang}.txt' )
a_ : int = os.path.join(__A , f'dict.{tgt_lang}.txt' )
a_ : Any = Dictionary.load(__A )
a_ : Any = rewrite_dict_keys(src_dict.indices )
a_ : List[Any] = len(__A )
a_ : Optional[Any] = os.path.join(__A , '''vocab-src.json''' )
print(f'Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records' )
with open(__A , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__A , ensure_ascii=__A , indent=__A ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
a_ : Tuple = True
for k in src_vocab.keys():
if not k.islower():
a_ : Dict = False
break
a_ : Any = Dictionary.load(__A )
a_ : List[Any] = rewrite_dict_keys(tgt_dict.indices )
a_ : int = len(__A )
a_ : Any = os.path.join(__A , '''vocab-tgt.json''' )
print(f'Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records' )
with open(__A , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__A , ensure_ascii=__A , indent=__A ) )
# merges_file (bpecodes)
a_ : Optional[int] = os.path.join(__A , VOCAB_FILES_NAMES['''merges_file'''] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
a_ : Optional[Any] = os.path.join(__A , __A )
if os.path.exists(__A ):
break
with open(__A , encoding='''utf-8''' ) as fin:
a_ : Dict = fin.read()
a_ : Any = re.sub(R''' \d+$''' , '''''' , __A , 0 , re.M ) # remove frequency number
print(f'Generating {merges_file}' )
with open(__A , '''w''' , encoding='''utf-8''' ) as fout:
fout.write(__A )
# model config
a_ : List[Any] = os.path.join(__A , '''config.json''' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f'need to extend tokenizer to support bpe={args["bpe"]}'
assert args["tokenizer"] == "moses", f'need to extend tokenizer to support bpe={args["tokenizer"]}'
a_ : int = {
'''architectures''': ['''FSMTForConditionalGeneration'''],
'''model_type''': '''fsmt''',
'''activation_dropout''': args['''activation_dropout'''],
'''activation_function''': '''relu''',
'''attention_dropout''': args['''attention_dropout'''],
'''d_model''': args['''decoder_embed_dim'''],
'''dropout''': args['''dropout'''],
'''init_std''': 0.02,
'''max_position_embeddings''': args['''max_source_positions'''],
'''num_hidden_layers''': args['''encoder_layers'''],
'''src_vocab_size''': src_vocab_size,
'''tgt_vocab_size''': tgt_vocab_size,
'''langs''': [src_lang, tgt_lang],
'''encoder_attention_heads''': args['''encoder_attention_heads'''],
'''encoder_ffn_dim''': args['''encoder_ffn_embed_dim'''],
'''encoder_layerdrop''': args['''encoder_layerdrop'''],
'''encoder_layers''': args['''encoder_layers'''],
'''decoder_attention_heads''': args['''decoder_attention_heads'''],
'''decoder_ffn_dim''': args['''decoder_ffn_embed_dim'''],
'''decoder_layerdrop''': args['''decoder_layerdrop'''],
'''decoder_layers''': args['''decoder_layers'''],
'''bos_token_id''': 0,
'''pad_token_id''': 1,
'''eos_token_id''': 2,
'''is_encoder_decoder''': True,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_all_embeddings'''],
}
# good hparam defaults to start with
a_ : List[Any] = 5
a_ : Tuple = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
a_ : Optional[int] = best_score_hparams[model_dir]['''length_penalty''']
else:
a_ : Union[str, Any] = 1.0
print(f'Generating {fsmt_model_config_file}' )
with open(__A , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__A , ensure_ascii=__A , indent=__A ) )
# tokenizer config
a_ : Dict = os.path.join(__A , __A )
a_ : List[str] = {
'''langs''': [src_lang, tgt_lang],
'''model_max_length''': 10_24,
'''do_lower_case''': do_lower_case,
}
print(f'Generating {fsmt_tokenizer_config_file}' )
with open(__A , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__A , ensure_ascii=__A , indent=__A ) )
# model
a_ : Any = chkpt['''models'''][0]
a_ : Optional[int] = model.state_dict()
# rename keys to start with 'model.'
a_ : Tuple = OrderedDict(('''model.''' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
a_ : Optional[Any] = [
'''model.model''',
'''model.encoder.version''',
'''model.decoder.version''',
'''model.encoder_embed_tokens.weight''',
'''model.decoder_embed_tokens.weight''',
'''model.encoder.embed_positions._float_tensor''',
'''model.decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
model_state_dict.pop(__A , __A )
a_ : str = FSMTConfig.from_pretrained(__A )
a_ : Optional[int] = FSMTForConditionalGeneration(__A )
# check that it loads ok
model_new.load_state_dict(__A , strict=__A )
# save
a_ : List[str] = os.path.join(__A , __A )
print(f'Generating {pytorch_weights_dump_path}' )
torch.save(__A , __A )
print('''Conversion is done!''' )
print('''\nLast step is to upload the files to s3''' )
print(f'cd {data_root}' )
print(f'transformers-cli upload {model_dir}' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fsmt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCAmelCase = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 466 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__lowercase :List[Any] = logging.get_logger(__name__)
__lowercase :Optional[int] = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "longformer"
def __init__( self : List[str] , a : Union[List[int], int] = 5_12 , a : int = 2 , a : int = 1 , a : int = 0 , a : int = 2 , a : int = 3_05_22 , a : int = 7_68 , a : int = 12 , a : int = 12 , a : int = 30_72 , a : str = "gelu" , a : float = 0.1 , a : float = 0.1 , a : int = 5_12 , a : int = 2 , a : float = 0.02 , a : float = 1E-12 , a : bool = False , **a : Dict , ) ->Tuple:
super().__init__(pad_token_id=a , **a )
SCREAMING_SNAKE_CASE__ : int = attention_window
SCREAMING_SNAKE_CASE__ : Any = sep_token_id
SCREAMING_SNAKE_CASE__ : str = bos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = eos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : str = type_vocab_size
SCREAMING_SNAKE_CASE__ : Any = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Any = onnx_export
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : int , a : "PretrainedConfig" , a : str = "default" , a : "List[PatchingSpec]" = None ) ->str:
super().__init__(a , a , a )
SCREAMING_SNAKE_CASE__ : Any = True
@property
def A_ ( self : int ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ : int = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE__ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def A_ ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = super().outputs
if self.task == "default":
SCREAMING_SNAKE_CASE__ : List[str] = {0: "batch"}
return outputs
@property
def A_ ( self : str ) ->float:
return 1E-4
@property
def A_ ( self : Any ) ->int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def A_ ( self : str , a : "PreTrainedTokenizerBase" , a : int = -1 , a : int = -1 , a : bool = False , a : Optional[TensorType] = None , ) ->Mapping[str, Any]:
SCREAMING_SNAKE_CASE__ : Tuple = super().generate_dummy_inputs(
preprocessor=a , batch_size=a , seq_length=a , is_pair=a , framework=a )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
SCREAMING_SNAKE_CASE__ : Any = torch.zeros_like(inputs["input_ids"] )
# make every second token global
SCREAMING_SNAKE_CASE__ : str = 1
return inputs | 26 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowercase :List[Any] = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
config.addinivalue_line(
"markers" , "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" )
config.addinivalue_line(
"markers" , "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" )
config.addinivalue_line("markers" , "is_pipeline_test: mark test to run only when pipelines are tested" )
config.addinivalue_line("markers" , "is_staging_test: mark test to run only in the staging environment" )
config.addinivalue_line("markers" , "accelerate_tests: mark test that require accelerate" )
config.addinivalue_line("markers" , "tool_tests: mark the tool tests that are run on their specific schedule" )
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : Tuple ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE__ : List[str] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(_lowerCamelCase , id=_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict ):
'''simple docstring'''
if exitstatus == 5:
SCREAMING_SNAKE_CASE__ : List[str] = 0
# Doctest custom flag to ignore output.
__lowercase :Optional[Any] = doctest.register_optionflag("IGNORE_RESULT")
__lowercase :Dict = doctest.OutputChecker
class _a ( lowercase__ ):
"""simple docstring"""
def A_ ( self : Dict , a : List[str] , a : Dict , a : int ) ->Optional[Any]:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , a , a , a )
__lowercase :Any = CustomOutputChecker
__lowercase :Any = HfDoctestModule
__lowercase :int = HfDocTestParser | 26 | 1 |
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCAmelCase_ = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase_ = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowerCAmelCase_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCAmelCase_ = re.compile(r'\[(.+?)\]\((https://huggingface\.co/.+?)\)')
lowerCAmelCase_ = {
'DecisionTransformerConfig',
'EncoderDecoderConfig',
'MusicgenConfig',
'RagConfig',
'SpeechEncoderDecoderConfig',
'TimmBackboneConfig',
'VisionEncoderDecoderConfig',
'VisionTextDualEncoderConfig',
'LlamaConfig',
}
def A__ ( A : str):
'''simple docstring'''
UpperCamelCase : Optional[Any] = None
# source code of `config_class`
UpperCamelCase : int = inspect.getsource(A)
UpperCamelCase : List[str] = _re_checkpoint.findall(A)
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/"):
UpperCamelCase : int = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
UpperCamelCase : Tuple = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
UpperCamelCase : Tuple = ckpt_name
break
return checkpoint
def A__ ( ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = []
for config_class in list(CONFIG_MAPPING.values()):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
UpperCamelCase : str = get_checkpoint_from_config_class(A)
UpperCamelCase : List[str] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(A)
if len(A) > 0:
UpperCamelCase : Any = "\n".join(sorted(A))
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''')
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 173 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
class UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 1
@register_to_config
def __init__( self , lowerCamelCase = 20_00 , lowerCamelCase = 0.15 , lowerCamelCase = 0.01 , lowerCamelCase = 1348.0 , lowerCamelCase = 1e-5 , lowerCamelCase = 1 , ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase : Tuple = sigma_max
# setable values
UpperCamelCase : Tuple = None
self.set_sigmas(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase = None ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None ) -> Any:
'''simple docstring'''
UpperCamelCase : Union[str, Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
UpperCamelCase : List[str] = torch.linspace(1 , lowerCamelCase , lowerCamelCase , device=lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase : List[Any] = sigma_min if sigma_min is not None else self.config.sigma_min
UpperCamelCase : Tuple = sigma_max if sigma_max is not None else self.config.sigma_max
UpperCamelCase : List[Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCamelCase , lowerCamelCase )
UpperCamelCase : str = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
UpperCamelCase : str = torch.exp(torch.linspace(math.log(lowerCamelCase ) , math.log(lowerCamelCase ) , lowerCamelCase ) )
UpperCamelCase : int = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase ) -> str:
'''simple docstring'''
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = True , ) -> Union[SdeVeOutput, Tuple]:
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
UpperCamelCase : Union[str, Any] = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
UpperCamelCase : Union[str, Any] = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
UpperCamelCase : str = timesteps.to(self.discrete_sigmas.device )
UpperCamelCase : List[str] = self.discrete_sigmas[timesteps].to(sample.device )
UpperCamelCase : Union[str, Any] = self.get_adjacent_sigma(lowerCamelCase , lowerCamelCase ).to(sample.device )
UpperCamelCase : str = torch.zeros_like(lowerCamelCase )
UpperCamelCase : str = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
UpperCamelCase : Optional[Any] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
UpperCamelCase : Optional[Any] = diffusion.unsqueeze(-1 )
UpperCamelCase : Union[str, Any] = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
UpperCamelCase : Union[str, Any] = randn_tensor(
sample.shape , layout=sample.layout , generator=lowerCamelCase , device=sample.device , dtype=sample.dtype )
UpperCamelCase : List[Any] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
UpperCamelCase : List[str] = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCamelCase , prev_sample_mean=lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = True , ) -> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
UpperCamelCase : Union[str, Any] = randn_tensor(sample.shape , layout=sample.layout , generator=lowerCamelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
UpperCamelCase : Union[str, Any] = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
UpperCamelCase : Dict = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
UpperCamelCase : Union[str, Any] = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
UpperCamelCase : int = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
UpperCamelCase : Optional[Any] = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
UpperCamelCase : str = step_size.unsqueeze(-1 )
UpperCamelCase : List[str] = sample + step_size * model_output
UpperCamelCase : Union[str, Any] = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> torch.FloatTensor:
'''simple docstring'''
UpperCamelCase : int = timesteps.to(original_samples.device )
UpperCamelCase : int = self.discrete_sigmas.to(original_samples.device )[timesteps]
UpperCamelCase : str = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCamelCase ) * sigmas[:, None, None, None]
)
UpperCamelCase : Optional[Any] = noise + original_samples
return noisy_samples
def __len__( self ) -> str:
'''simple docstring'''
return self.config.num_train_timesteps
| 173 | 1 |
"""simple docstring"""
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def SCREAMING_SNAKE_CASE_ ( snake_case : Optional[Any] , snake_case : Tuple )-> List[Any]:
# ===== initialization =====
_lowerCamelCase = Mock()
_lowerCamelCase = conn, Mock()
_lowerCamelCase = iter([1, None] )
_lowerCamelCase = lambda snake_case : next(snake_case )
# ===== invoke =====
send_file(filename='mytext.txt' , testing=snake_case )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 701 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __a ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ : Optional[int] = IFInpaintingPipeline
SCREAMING_SNAKE_CASE__ : Any = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
SCREAMING_SNAKE_CASE__ : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ : List[str] = PipelineTesterMixin.required_optional_params - {"latents"}
def snake_case_ ( self ):
return self._get_dummy_components()
def snake_case_ ( self , a__ , a__=0 ):
if str(a__ ).startswith('mps' ):
_lowerCamelCase = torch.manual_seed(a__ )
else:
_lowerCamelCase = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
_lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
_lowerCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def snake_case_ ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def snake_case_ ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def snake_case_ ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def snake_case_ ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def snake_case_ ( self ):
self._test_save_load_local()
def snake_case_ ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 222 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] ) -> List[str]:
if not head:
return True
# split the list to two parts
__snake_case , __snake_case = head.next, head
while fast and fast.next:
__snake_case = fast.next.next
__snake_case = slow.next
__snake_case = slow.next
__snake_case = None # Don't forget here! But forget still works!
# reverse the second part
__snake_case = None
while second:
__snake_case = second.next
__snake_case = node
__snake_case = second
__snake_case = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
__snake_case = node.next
__snake_case = head.next
return True
def __UpperCAmelCase ( _UpperCAmelCase : Optional[int] ) -> Tuple:
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
__snake_case = __snake_case = __snake_case = head
while fast and fast.next:
__snake_case , __snake_case = fast.next.next, slow.next
# 2. Push the second half into the stack
__snake_case = [slow.val]
while slow.next:
__snake_case = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
__snake_case = cur.next
return True
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> str:
if not head or not head.next:
return True
__snake_case = {}
__snake_case = 0
while head:
if head.val in d:
d[head.val].append(_UpperCAmelCase )
else:
__snake_case = [pos]
__snake_case = head.next
pos += 1
__snake_case = pos - 1
__snake_case = 0
for v in d.values():
if len(_UpperCAmelCase ) % 2 != 0:
middle += 1
else:
__snake_case = 0
for i in range(0 , len(_UpperCAmelCase ) ):
if v[i] + v[len(_UpperCAmelCase ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 69 |
'''simple docstring'''
def lowerCamelCase__ ( a ):
if number < 0:
raise ValueError('number must not be negative' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356 | 0 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __A (__magic_name__ ):
snake_case :Dict = (PNDMScheduler,)
snake_case :List[Any] = (("num_inference_steps", 50),)
def _snake_case ( self , **UpperCamelCase_ ):
__UpperCAmelCase : int = {
"num_train_timesteps": 10_00,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
}
config.update(**UpperCamelCase_ )
return config
def _snake_case ( self , UpperCamelCase_=0 , **UpperCamelCase_ ):
__UpperCAmelCase : Any = dict(self.forward_default_kwargs )
__UpperCAmelCase : Dict = kwargs.pop("num_inference_steps" , UpperCamelCase_ )
__UpperCAmelCase : str = self.dummy_sample
__UpperCAmelCase : Optional[Any] = 0.1 * sample
__UpperCAmelCase : Any = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase : List[Any] = self.get_scheduler_config(**UpperCamelCase_ )
__UpperCAmelCase : Tuple = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(UpperCamelCase_ )
# copy over dummy past residuals
__UpperCAmelCase : List[Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase_ )
__UpperCAmelCase : int = scheduler_class.from_pretrained(UpperCamelCase_ )
new_scheduler.set_timesteps(UpperCamelCase_ )
# copy over dummy past residuals
__UpperCAmelCase : Any = dummy_past_residuals[:]
__UpperCAmelCase : Any = scheduler.step_prk(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
__UpperCAmelCase : Dict = new_scheduler.step_prk(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__UpperCAmelCase : List[str] = scheduler.step_plms(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
__UpperCAmelCase : int = new_scheduler.step_plms(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _snake_case ( self ):
pass
def _snake_case ( self , UpperCamelCase_=0 , **UpperCamelCase_ ):
__UpperCAmelCase : str = dict(self.forward_default_kwargs )
__UpperCAmelCase : Optional[Any] = kwargs.pop("num_inference_steps" , UpperCamelCase_ )
__UpperCAmelCase : int = self.dummy_sample
__UpperCAmelCase : int = 0.1 * sample
__UpperCAmelCase : Dict = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase : Optional[int] = self.get_scheduler_config()
__UpperCAmelCase : Optional[int] = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(UpperCamelCase_ )
# copy over dummy past residuals (must be after setting timesteps)
__UpperCAmelCase : Union[str, Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase_ )
__UpperCAmelCase : List[Any] = scheduler_class.from_pretrained(UpperCamelCase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase_ )
# copy over dummy past residual (must be after setting timesteps)
__UpperCAmelCase : Optional[int] = dummy_past_residuals[:]
__UpperCAmelCase : str = scheduler.step_prk(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
__UpperCAmelCase : str = new_scheduler.step_prk(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__UpperCAmelCase : Any = scheduler.step_plms(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
__UpperCAmelCase : List[str] = new_scheduler.step_plms(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _snake_case ( self , **UpperCamelCase_ ):
__UpperCAmelCase : Dict = self.scheduler_classes[0]
__UpperCAmelCase : Optional[int] = self.get_scheduler_config(**UpperCamelCase_ )
__UpperCAmelCase : Dict = scheduler_class(**UpperCamelCase_ )
__UpperCAmelCase : Optional[Any] = 10
__UpperCAmelCase : Optional[Any] = self.dummy_model()
__UpperCAmelCase : Dict = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase_ )
for i, t in enumerate(scheduler.prk_timesteps ):
__UpperCAmelCase : str = model(UpperCamelCase_ , UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = scheduler.step_prk(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
__UpperCAmelCase : Any = model(UpperCamelCase_ , UpperCamelCase_ )
__UpperCAmelCase : List[Any] = scheduler.step_plms(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample
return sample
def _snake_case ( self ):
__UpperCAmelCase : List[Any] = dict(self.forward_default_kwargs )
__UpperCAmelCase : Optional[int] = kwargs.pop("num_inference_steps" , UpperCamelCase_ )
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase : str = self.get_scheduler_config()
__UpperCAmelCase : Tuple = scheduler_class(**UpperCamelCase_ )
__UpperCAmelCase : Any = self.dummy_sample
__UpperCAmelCase : List[str] = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase_ , "set_timesteps" ):
scheduler.set_timesteps(UpperCamelCase_ )
elif num_inference_steps is not None and not hasattr(UpperCamelCase_ , "set_timesteps" ):
__UpperCAmelCase : List[str] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__UpperCAmelCase : int = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
__UpperCAmelCase : Optional[Any] = dummy_past_residuals[:]
__UpperCAmelCase : Dict = scheduler.step_prk(UpperCamelCase_ , 0 , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
__UpperCAmelCase : int = scheduler.step_prk(UpperCamelCase_ , 1 , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__UpperCAmelCase : Optional[int] = scheduler.step_plms(UpperCamelCase_ , 0 , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
__UpperCAmelCase : Union[str, Any] = scheduler.step_plms(UpperCamelCase_ , 1 , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _snake_case ( self ):
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=UpperCamelCase_ )
def _snake_case ( self ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=UpperCamelCase_ )
__UpperCAmelCase : str = self.scheduler_classes[0]
__UpperCAmelCase : str = self.get_scheduler_config(steps_offset=1 )
__UpperCAmelCase : List[str] = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , )
def _snake_case ( self ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] , [0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=UpperCamelCase_ , beta_end=UpperCamelCase_ )
def _snake_case ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCamelCase_ )
def _snake_case ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase_ )
def _snake_case ( self ):
for t in [1, 5, 10]:
self.check_over_forward(time_step=UpperCamelCase_ )
def _snake_case ( self ):
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=UpperCamelCase_ )
def _snake_case ( self ):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
__UpperCAmelCase : Dict = 27
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase : Optional[Any] = self.dummy_sample
__UpperCAmelCase : int = 0.1 * sample
__UpperCAmelCase : Dict = self.get_scheduler_config()
__UpperCAmelCase : str = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(UpperCamelCase_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
__UpperCAmelCase : int = scheduler.step_prk(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample
def _snake_case ( self ):
with self.assertRaises(UpperCamelCase_ ):
__UpperCAmelCase : Tuple = self.scheduler_classes[0]
__UpperCAmelCase : List[str] = self.get_scheduler_config()
__UpperCAmelCase : Optional[Any] = scheduler_class(**UpperCamelCase_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def _snake_case ( self ):
__UpperCAmelCase : Any = self.full_loop()
__UpperCAmelCase : Tuple = torch.sum(torch.abs(UpperCamelCase_ ) )
__UpperCAmelCase : int = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 1_9_8.1_3_1_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_5_8_0 ) < 1E-3
def _snake_case ( self ):
__UpperCAmelCase : List[Any] = self.full_loop(prediction_type="v_prediction" )
__UpperCAmelCase : Any = torch.sum(torch.abs(UpperCamelCase_ ) )
__UpperCAmelCase : List[Any] = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 6_7.3_9_8_6 ) < 1E-2
assert abs(result_mean.item() - 0.0_8_7_8 ) < 1E-3
def _snake_case ( self ):
# We specify different beta, so that the first alpha is 0.99
__UpperCAmelCase : str = self.full_loop(set_alpha_to_one=UpperCamelCase_ , beta_start=0.0_1 )
__UpperCAmelCase : Tuple = torch.sum(torch.abs(UpperCamelCase_ ) )
__UpperCAmelCase : List[str] = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 2_3_0.0_3_9_9 ) < 1E-2
assert abs(result_mean.item() - 0.2_9_9_5 ) < 1E-3
def _snake_case ( self ):
# We specify different beta, so that the first alpha is 0.99
__UpperCAmelCase : Dict = self.full_loop(set_alpha_to_one=UpperCamelCase_ , beta_start=0.0_1 )
__UpperCAmelCase : Optional[int] = torch.sum(torch.abs(UpperCamelCase_ ) )
__UpperCAmelCase : Tuple = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 1_8_6.9_4_8_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_4_3_4 ) < 1E-3
| 10 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ = 100 ) -> int:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = (n * (n + 1) // 2) ** 2
__UpperCAmelCase : Any = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 10 | 1 |
import tensorflow as tf
from ...tf_utils import shape_list
class A__ ( tf.keras.layers.Layer ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=1 , lowerCamelCase=False , **lowerCamelCase ) -> Tuple:
"""simple docstring"""
super().__init__(**lowerCamelCase )
__magic_name__ : Any = vocab_size
__magic_name__ : str = d_embed
__magic_name__ : Union[str, Any] = d_proj
__magic_name__ : Dict = cutoffs + [vocab_size]
__magic_name__ : Tuple = [0] + self.cutoffs
__magic_name__ : Tuple = div_val
__magic_name__ : List[Any] = self.cutoffs[0]
__magic_name__ : Tuple = len(self.cutoffs ) - 1
__magic_name__ : Optional[Any] = self.shortlist_size + self.n_clusters
__magic_name__ : Tuple = keep_order
__magic_name__ : Optional[int] = []
__magic_name__ : List[str] = []
def lowercase ( self , lowerCamelCase ) -> List[Any]:
"""simple docstring"""
if self.n_clusters > 0:
__magic_name__ : Optional[Any] = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='''zeros''' , trainable=lowerCamelCase , name='''cluster_weight''' )
__magic_name__ : int = self.add_weight(
shape=(self.n_clusters,) , initializer='''zeros''' , trainable=lowerCamelCase , name='''cluster_bias''' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
__magic_name__ : Optional[Any] = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='''zeros''' , trainable=lowerCamelCase , name=F'''out_projs_._{i}''' , )
self.out_projs.append(lowerCamelCase )
else:
self.out_projs.append(lowerCamelCase )
__magic_name__ : Tuple = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='''zeros''' , trainable=lowerCamelCase , name=F'''out_layers_._{i}_._weight''' , )
__magic_name__ : Optional[Any] = self.add_weight(
shape=(self.vocab_size,) , initializer='''zeros''' , trainable=lowerCamelCase , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
__magic_name__ , __magic_name__ : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__magic_name__ : Tuple = self.d_embed // (self.div_val**i)
__magic_name__ : List[str] = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='''zeros''' , trainable=lowerCamelCase , name=F'''out_projs_._{i}''' )
self.out_projs.append(lowerCamelCase )
__magic_name__ : str = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='''zeros''' , trainable=lowerCamelCase , name=F'''out_layers_._{i}_._weight''' , )
__magic_name__ : Union[str, Any] = self.add_weight(
shape=(r_idx - l_idx,) , initializer='''zeros''' , trainable=lowerCamelCase , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(lowerCamelCase )
@staticmethod
def lowercase ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ : Dict = x
if proj is not None:
__magic_name__ : Union[str, Any] = tf.einsum('''ibd,ed->ibe''' , lowerCamelCase , lowerCamelCase )
return tf.einsum('''ibd,nd->ibn''' , lowerCamelCase , lowerCamelCase ) + b
@staticmethod
def lowercase ( lowerCamelCase , lowerCamelCase ) -> int:
"""simple docstring"""
__magic_name__ : Tuple = shape_list(lowerCamelCase )
__magic_name__ : List[str] = tf.range(lp_size[0] , dtype=target.dtype )
__magic_name__ : Optional[Any] = tf.stack([r, target] , 1 )
return tf.gather_nd(lowerCamelCase , lowerCamelCase )
def lowercase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=True , lowerCamelCase=False ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ : Optional[Any] = 0
if self.n_clusters == 0:
__magic_name__ : Optional[Any] = self._logit(lowerCamelCase , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
__magic_name__ : List[str] = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=lowerCamelCase , logits=lowerCamelCase )
__magic_name__ : List[Any] = tf.nn.log_softmax(lowerCamelCase , axis=-1 )
else:
__magic_name__ : List[str] = shape_list(lowerCamelCase )
__magic_name__ : Tuple = []
__magic_name__ : List[str] = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
__magic_name__ , __magic_name__ : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
__magic_name__ : List[str] = (target >= l_idx) & (target < r_idx)
__magic_name__ : Union[str, Any] = tf.where(lowerCamelCase )
__magic_name__ : Optional[Any] = tf.boolean_mask(lowerCamelCase , lowerCamelCase ) - l_idx
if self.div_val == 1:
__magic_name__ : Tuple = self.out_layers[0][0][l_idx:r_idx]
__magic_name__ : Tuple = self.out_layers[0][1][l_idx:r_idx]
else:
__magic_name__ : Tuple = self.out_layers[i][0]
__magic_name__ : Tuple = self.out_layers[i][1]
if i == 0:
__magic_name__ : Optional[Any] = tf.concat([cur_W, self.cluster_weight] , 0 )
__magic_name__ : Optional[Any] = tf.concat([cur_b, self.cluster_bias] , 0 )
__magic_name__ : Optional[Any] = self._logit(lowerCamelCase , lowerCamelCase , lowerCamelCase , self.out_projs[0] )
__magic_name__ : Tuple = tf.nn.log_softmax(lowerCamelCase )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
__magic_name__ : Dict = tf.boolean_mask(lowerCamelCase , lowerCamelCase )
__magic_name__ : Optional[Any] = self._gather_logprob(lowerCamelCase , lowerCamelCase )
else:
__magic_name__ : str = self._logit(lowerCamelCase , lowerCamelCase , lowerCamelCase , self.out_projs[i] )
__magic_name__ : List[str] = tf.nn.log_softmax(lowerCamelCase )
__magic_name__ : Optional[int] = self.cutoffs[0] + i - 1 # No probability for the head cluster
__magic_name__ : Optional[int] = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(lowerCamelCase )
if target is not None:
__magic_name__ : Dict = tf.boolean_mask(lowerCamelCase , lowerCamelCase )
__magic_name__ : List[str] = tf.boolean_mask(lowerCamelCase , lowerCamelCase )
__magic_name__ : str = self._gather_logprob(lowerCamelCase , lowerCamelCase )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(lowerCamelCase , -cur_logprob , shape_list(lowerCamelCase ) )
__magic_name__ : Any = tf.concat(lowerCamelCase , axis=-1 )
if target is not None:
if return_mean:
__magic_name__ : Tuple = tf.reduce_mean(lowerCamelCase )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(lowerCamelCase )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(lowerCamelCase , name=self.name , aggregation='''mean''' if return_mean else '''''' )
return out
| 154 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class A__ ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : Any ="openai/whisper-base"
lowerCamelCase__ : Any =(
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
lowerCamelCase__ : Union[str, Any] ="transcriber"
lowerCamelCase__ : List[str] =WhisperProcessor
lowerCamelCase__ : Tuple =WhisperForConditionalGeneration
lowerCamelCase__ : Tuple =["audio"]
lowerCamelCase__ : List[str] =["text"]
def lowercase ( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return self.pre_processor(lowerCamelCase , return_tensors='''pt''' ).input_features
def lowercase ( self , lowerCamelCase ) -> Dict:
"""simple docstring"""
return self.model.generate(inputs=lowerCamelCase )
def lowercase ( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return self.pre_processor.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )[0]
| 154 | 1 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class __A ( A_ , unittest.TestCase ):
UpperCamelCase :int = AlbertTokenizer
UpperCamelCase :Optional[int] = AlbertTokenizerFast
UpperCamelCase :Tuple = True
UpperCamelCase :Any = True
UpperCamelCase :Optional[int] = True
def _snake_case (self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__ : Dict = AlbertTokenizer(__magic_name__ )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case (self , __magic_name__ ):
lowerCamelCase__ : Optional[Any] = """this is a test"""
lowerCamelCase__ : str = """this is a test"""
return input_text, output_text
def _snake_case (self ):
lowerCamelCase__ : Any = """<pad>"""
lowerCamelCase__ : List[str] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def _snake_case (self ):
lowerCamelCase__ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """▁eloquent""" )
self.assertEqual(len(__magic_name__ ) , 30000 )
def _snake_case (self ):
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def _snake_case (self ):
if not self.test_rust_tokenizer:
return
lowerCamelCase__ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase__ : Union[str, Any] = self.get_rust_tokenizer()
lowerCamelCase__ : str = """I was born in 92000, and this is falsé."""
lowerCamelCase__ : Optional[Any] = tokenizer.tokenize(__magic_name__ )
lowerCamelCase__ : Any = rust_tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
lowerCamelCase__ : Any = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
lowerCamelCase__ : Optional[int] = rust_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
lowerCamelCase__ : List[Any] = self.get_rust_tokenizer()
lowerCamelCase__ : Any = tokenizer.encode(__magic_name__ )
lowerCamelCase__ : Any = rust_tokenizer.encode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
def _snake_case (self ):
lowerCamelCase__ : List[Any] = AlbertTokenizer(__magic_name__ , keep_accents=__magic_name__ )
lowerCamelCase__ : Any = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__magic_name__ , ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , [48, 25, 21, 1289] )
lowerCamelCase__ : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__magic_name__ , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
lowerCamelCase__ : Dict = tokenizer.convert_tokens_to_ids(__magic_name__ )
self.assertListEqual(__magic_name__ , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
lowerCamelCase__ : List[Any] = tokenizer.convert_ids_to_tokens(__magic_name__ )
self.assertListEqual(
__magic_name__ , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , )
def _snake_case (self ):
lowerCamelCase__ : Any = AlbertTokenizer(__magic_name__ )
lowerCamelCase__ : Any = tokenizer.encode("""sequence builders""" )
lowerCamelCase__ : Any = tokenizer.encode("""multi-sequence build""" )
lowerCamelCase__ : List[Any] = tokenizer.build_inputs_with_special_tokens(__magic_name__ )
lowerCamelCase__ : Dict = tokenizer.build_inputs_with_special_tokens(__magic_name__ , __magic_name__ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def _snake_case (self ):
# fmt: off
lowerCamelCase__ : str = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
| 96 |
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def _A (UpperCamelCase : str ) ->None:
'''simple docstring'''
lowerCamelCase__ ,lowerCamelCase__ : List[str] = analyze_text(UpperCamelCase )
lowerCamelCase__ : Any = list(""" """ + ascii_lowercase )
# what is our total sum of probabilities.
lowerCamelCase__ : str = sum(single_char_strings.values() )
# one length string
lowerCamelCase__ : Union[str, Any] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowerCamelCase__ : List[Any] = single_char_strings[ch]
lowerCamelCase__ : List[str] = my_str / all_sum
my_fir_sum += prob * math.loga(UpperCamelCase ) # entropy formula.
# print entropy
print(f"{round(-1 * my_fir_sum ):.1f}" )
# two len string
lowerCamelCase__ : str = sum(two_char_strings.values() )
lowerCamelCase__ : int = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowerCamelCase__ : str = cha + cha
if sequence in two_char_strings:
lowerCamelCase__ : str = two_char_strings[sequence]
lowerCamelCase__ : int = int(UpperCamelCase ) / all_sum
my_sec_sum += prob * math.loga(UpperCamelCase )
# print second entropy
print(f"{round(-1 * my_sec_sum ):.1f}" )
# print the difference between them
print(f"{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}" )
def _A (UpperCamelCase : str ) ->tuple[dict, dict]:
'''simple docstring'''
lowerCamelCase__ : Optional[int] = Counter() # type: ignore
lowerCamelCase__ : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(UpperCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def _A () ->List[str]:
'''simple docstring'''
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 96 | 1 |
from __future__ import annotations
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Any , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : int = data
SCREAMING_SNAKE_CASE : Node | None = None
SCREAMING_SNAKE_CASE : Node | None = None
def lowerCamelCase__ ( lowercase ): # In Order traversal of the tree
"""simple docstring"""
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def lowerCamelCase__ ( ): # Main function for testing.
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = Node(1 )
SCREAMING_SNAKE_CASE : Dict = Node(2 )
SCREAMING_SNAKE_CASE : List[Any] = Node(3 )
SCREAMING_SNAKE_CASE : List[Any] = Node(4 )
SCREAMING_SNAKE_CASE : Tuple = Node(5 )
SCREAMING_SNAKE_CASE : str = Node(6 )
SCREAMING_SNAKE_CASE : List[Any] = Node(7 )
SCREAMING_SNAKE_CASE : str = Node(8 )
SCREAMING_SNAKE_CASE : Any = Node(9 )
print(is_full_binary_tree(lowercase ) )
print(depth_of_tree(lowercase ) )
print("Tree is: " )
display(lowercase )
if __name__ == "__main__":
main()
| 62 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ = {
'configuration_bridgetower': [
'BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BridgeTowerConfig',
'BridgeTowerTextConfig',
'BridgeTowerVisionConfig',
],
'processing_bridgetower': ['BridgeTowerProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['BridgeTowerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST',
'BridgeTowerForContrastiveLearning',
'BridgeTowerForImageAndTextRetrieval',
'BridgeTowerForMaskedLM',
'BridgeTowerModel',
'BridgeTowerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure) | 296 | 0 |
from __future__ import annotations
from collections import deque
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : str , a : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE = []
self.adlist.append(
{"""value""": """""", """next_states""": [], """fail_state""": 0, """output""": []} )
for keyword in keywords:
self.add_keyword(A_ )
self.set_fail_transitions()
def _UpperCAmelCase ( self : Optional[Any] , a : Dict , a : List[str] ) -> int | None:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def _UpperCAmelCase ( self : Optional[int] , a : List[str] ) -> None:
SCREAMING_SNAKE_CASE = 0
for character in keyword:
SCREAMING_SNAKE_CASE = self.find_next_state(A_ , A_ )
if next_state is None:
self.adlist.append(
{
"""value""": character,
"""next_states""": [],
"""fail_state""": 0,
"""output""": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
SCREAMING_SNAKE_CASE = len(self.adlist ) - 1
else:
SCREAMING_SNAKE_CASE = next_state
self.adlist[current_state]["output"].append(A_ )
def _UpperCAmelCase ( self : Union[str, Any] ) -> None:
SCREAMING_SNAKE_CASE = deque()
for node in self.adlist[0]["next_states"]:
q.append(A_ )
SCREAMING_SNAKE_CASE = 0
while q:
SCREAMING_SNAKE_CASE = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(A_ )
SCREAMING_SNAKE_CASE = self.adlist[r]["""fail_state"""]
while (
self.find_next_state(A_ , self.adlist[child]["""value"""] ) is None
and state != 0
):
SCREAMING_SNAKE_CASE = self.adlist[state]["""fail_state"""]
SCREAMING_SNAKE_CASE = self.find_next_state(
A_ , self.adlist[child]["""value"""] )
if self.adlist[child]["fail_state"] is None:
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = (
self.adlist[child]["""output"""]
+ self.adlist[self.adlist[child]["""fail_state"""]]["""output"""]
)
def _UpperCAmelCase ( self : Dict , a : Any ) -> dict[str, list[int]]:
SCREAMING_SNAKE_CASE = {} # returns a dict with keywords and list of its occurrences
SCREAMING_SNAKE_CASE = 0
for i in range(len(A_ ) ):
while (
self.find_next_state(A_ , string[i] ) is None
and current_state != 0
):
SCREAMING_SNAKE_CASE = self.adlist[current_state]["""fail_state"""]
SCREAMING_SNAKE_CASE = self.find_next_state(A_ , string[i] )
if next_state is None:
SCREAMING_SNAKE_CASE = 0
else:
SCREAMING_SNAKE_CASE = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
SCREAMING_SNAKE_CASE = []
result[key].append(i - len(A_ ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
__A : Dict = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
__A : Tuple = typing.Union[np.floataa, int, float] # noqa: UP007
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return np.sqrt(np.sum((np.asarray(SCREAMING_SNAKE_CASE ) - np.asarray(SCREAMING_SNAKE_CASE )) ** 2 ) )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return sum((va - va) ** 2 for va, va in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) ** (1 / 2)
if __name__ == "__main__":
def lowerCamelCase_ ( ):
'''simple docstring'''
from timeit import timeit
print("""Without Numpy""" )
print(
timeit(
"""euclidean_distance_no_np([1, 2, 3], [4, 5, 6])""" , number=1_00_00 , globals=globals() , ) )
print("""With Numpy""" )
print(
timeit(
"""euclidean_distance([1, 2, 3], [4, 5, 6])""" , number=1_00_00 , globals=globals() , ) )
benchmark()
| 450 | 0 |
"""simple docstring"""
import itertools
import math
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : Optional[Any] = 2
while True:
if is_prime(UpperCamelCase__ ):
yield num
num += 1
def lowerCAmelCase__ ( UpperCamelCase__ = 1_0_0_0_1 ):
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , UpperCamelCase__ ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 389 |
"""simple docstring"""
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
_snake_case = get_logger(__name__)
class UpperCamelCase ( enum.Enum ):
UpperCamelCase : str = '''all_checks'''
UpperCamelCase : Any = '''basic_checks'''
UpperCamelCase : Union[str, Any] = '''no_checks'''
class UpperCamelCase ( snake_case_ ):
pass
class UpperCamelCase ( snake_case_ ):
pass
class UpperCamelCase ( snake_case_ ):
pass
class UpperCamelCase ( snake_case_ ):
pass
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ):
'''simple docstring'''
if expected_checksums is None:
logger.info("""Unable to verify checksums.""" )
return
if len(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) )
if len(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) )
_a : int = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
_a : List[str] = """ for """ + verification_name if verification_name is not None else """"""
if len(UpperCamelCase__ ) > 0:
raise NonMatchingChecksumError(
F"""Checksums didn't match{for_verification_name}:\n"""
F"""{bad_urls}\n"""
"""Set `verification_mode='no_checks'` to skip checksums verification and ignore this error""" )
logger.info("""All the checksums matched successfully""" + for_verification_name )
class UpperCamelCase ( snake_case_ ):
pass
class UpperCamelCase ( snake_case_ ):
pass
class UpperCamelCase ( snake_case_ ):
pass
class UpperCamelCase ( snake_case_ ):
pass
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if expected_splits is None:
logger.info("""Unable to verify splits sizes.""" )
return
if len(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) > 0:
raise ExpectedMoreSplits(str(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) )
if len(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) > 0:
raise UnexpectedSplits(str(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) )
_a : List[Any] = [
{"""expected""": expected_splits[name], """recorded""": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(UpperCamelCase__ ) > 0:
raise NonMatchingSplitsSizesError(str(UpperCamelCase__ ) )
logger.info("""All the splits matched successfully.""" )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ = True ):
'''simple docstring'''
if record_checksum:
_a : int = shaaaa()
with open(UpperCamelCase__ , """rb""" ) as f:
for chunk in iter(lambda: f.read(1 << 2_0 ) , B"""""" ):
m.update(UpperCamelCase__ )
_a : List[Any] = m.hexdigest()
else:
_a : Any = None
return {"num_bytes": os.path.getsize(UpperCamelCase__ ), "checksum": checksum}
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 389 | 1 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase : Any = 16
lowerCAmelCase : Dict = 32
def A_ ( a , a = 1_6 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
SCREAMING_SNAKE_CASE_ : str = load_dataset('glue' , 'mrpc' )
def tokenize_function(a ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=a , max_length=a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE_ : Tuple = datasets.map(
a , batched=a , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE_ : str = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(a ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE_ : str = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE_ : int = 1_6
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE_ : List[Any] = 8
else:
SCREAMING_SNAKE_CASE_ : Dict = None
return tokenizer.pad(
a , padding='longest' , max_length=a , pad_to_multiple_of=a , return_tensors='pt' , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_ : List[Any] = DataLoader(
tokenized_datasets['train'] , shuffle=a , collate_fn=a , batch_size=a )
SCREAMING_SNAKE_CASE_ : int = DataLoader(
tokenized_datasets['validation'] , shuffle=a , collate_fn=a , batch_size=a )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase : Tuple = mocked_dataloaders # noqa: F811
def A_ ( a , a ):
"""simple docstring"""
if os.environ.get('TESTING_MOCKED_DATALOADERS' , a ) == "1":
SCREAMING_SNAKE_CASE_ : str = 2
# Initialize accelerator
SCREAMING_SNAKE_CASE_ : List[str] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE_ : List[str] = config['lr']
SCREAMING_SNAKE_CASE_ : int = int(config['num_epochs'] )
SCREAMING_SNAKE_CASE_ : Dict = int(config['seed'] )
SCREAMING_SNAKE_CASE_ : int = int(config['batch_size'] )
SCREAMING_SNAKE_CASE_ : Any = evaluate.load('glue' , 'mrpc' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=a )
def inner_training_loop(a ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(a )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE_ : Tuple = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=a )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE_ : int = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE_ : Optional[Any] = AdamW(params=model.parameters() , lr=a )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = get_dataloaders(a , a )
# Instantiate scheduler
SCREAMING_SNAKE_CASE_ : List[Any] = get_linear_schedule_with_warmup(
optimizer=a , num_warmup_steps=1_0_0 , num_training_steps=(len(a ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = accelerator.prepare(
a , a , a , a , a )
# Now we train the model
for epoch in range(a ):
model.train()
for step, batch in enumerate(a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(**a )
SCREAMING_SNAKE_CASE_ : int = outputs.loss
accelerator.backward(a )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(**a )
SCREAMING_SNAKE_CASE_ : Any = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=a , references=a , )
SCREAMING_SNAKE_CASE_ : Dict = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , a )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=a , default=a , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
SCREAMING_SNAKE_CASE_ : str = parser.parse_args()
SCREAMING_SNAKE_CASE_ : List[str] = {'lr': 2e-5, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 1_6}
training_function(a , a )
if __name__ == "__main__":
main()
| 353 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _A ( __magic_name__):
SCREAMING_SNAKE_CASE : List[str] = (PNDMScheduler,)
SCREAMING_SNAKE_CASE : Dict = (('''num_inference_steps''', 50),)
def UpperCAmelCase ( self , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE=0 , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = kwargs.pop('num_inference_steps' , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = self.dummy_sample
SCREAMING_SNAKE_CASE_ : Tuple = 0.1 * sample
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE_ : Any = self.get_scheduler_config(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Any = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE_ : List[Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = scheduler_class.from_pretrained(_SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dummy_past_residuals[:]
SCREAMING_SNAKE_CASE_ : str = scheduler.step_prk(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
SCREAMING_SNAKE_CASE_ : Any = new_scheduler.step_prk(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
SCREAMING_SNAKE_CASE_ : Tuple = scheduler.step_plms(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
SCREAMING_SNAKE_CASE_ : int = new_scheduler.step_plms(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE=0 , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE_ : Optional[Any] = kwargs.pop('num_inference_steps' , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = self.dummy_sample
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0.1 * sample
SCREAMING_SNAKE_CASE_ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE_ : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : int = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
SCREAMING_SNAKE_CASE_ : Tuple = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = scheduler_class.from_pretrained(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
SCREAMING_SNAKE_CASE_ : str = dummy_past_residuals[:]
SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler.step_prk(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
SCREAMING_SNAKE_CASE_ : Tuple = new_scheduler.step_prk(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
SCREAMING_SNAKE_CASE_ : Any = scheduler.step_plms(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
SCREAMING_SNAKE_CASE_ : List[str] = new_scheduler.step_plms(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_scheduler_config(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = scheduler_class(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Any = 10
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE_ : Dict = self.dummy_sample_deter
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.prk_timesteps ):
SCREAMING_SNAKE_CASE_ : Any = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler.step_prk(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
SCREAMING_SNAKE_CASE_ : List[Any] = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = scheduler.step_plms(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).prev_sample
return sample
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE_ : List[str] = kwargs.pop('num_inference_steps' , _SCREAMING_SNAKE_CASE )
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler_class(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_sample
SCREAMING_SNAKE_CASE_ : int = 0.1 * sample
if num_inference_steps is not None and hasattr(_SCREAMING_SNAKE_CASE , 'set_timesteps' ):
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
elif num_inference_steps is not None and not hasattr(_SCREAMING_SNAKE_CASE , 'set_timesteps' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
SCREAMING_SNAKE_CASE_ : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
SCREAMING_SNAKE_CASE_ : str = dummy_past_residuals[:]
SCREAMING_SNAKE_CASE_ : str = scheduler.step_prk(_SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
SCREAMING_SNAKE_CASE_ : str = scheduler.step_prk(_SCREAMING_SNAKE_CASE , 1 , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
SCREAMING_SNAKE_CASE_ : List[str] = scheduler.step_plms(_SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler.step_plms(_SCREAMING_SNAKE_CASE , 1 , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase ( self ):
"""simple docstring"""
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_scheduler_config(steps_offset=1 )
SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def UpperCAmelCase ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=_SCREAMING_SNAKE_CASE , beta_end=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
for t in [1, 5, 10]:
self.check_over_forward(time_step=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 27
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE_ : int = self.dummy_sample
SCREAMING_SNAKE_CASE_ : Tuple = 0.1 * sample
SCREAMING_SNAKE_CASE_ : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : str = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
SCREAMING_SNAKE_CASE_ : int = scheduler.step_prk(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).prev_sample
def UpperCAmelCase ( self ):
"""simple docstring"""
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : Tuple = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.full_loop()
SCREAMING_SNAKE_CASE_ : Tuple = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ : str = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 198.1318 ) < 1e-2
assert abs(result_mean.item() - 0.2580 ) < 1e-3
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.full_loop(prediction_type='v_prediction' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 67.3986 ) < 1e-2
assert abs(result_mean.item() - 0.0878 ) < 1e-3
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.full_loop(set_alpha_to_one=_SCREAMING_SNAKE_CASE , beta_start=0.01 )
SCREAMING_SNAKE_CASE_ : str = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 230.0399 ) < 1e-2
assert abs(result_mean.item() - 0.2995 ) < 1e-3
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.full_loop(set_alpha_to_one=_SCREAMING_SNAKE_CASE , beta_start=0.01 )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 186.9482 ) < 1e-2
assert abs(result_mean.item() - 0.2434 ) < 1e-3
| 353 | 1 |
'''simple docstring'''
from timeit import timeit
_lowerCAmelCase = {
"MALAYALAM": True,
"String": False,
"rotor": True,
"level": True,
"A": True,
"BB": True,
"ABC": False,
"amanaplanacanalpanama": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def _lowerCAmelCase ( lowercase : str ) ->bool:
"""simple docstring"""
lowercase__ = 0
lowercase__ = len(lowercase ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def _lowerCAmelCase ( lowercase : str ) ->bool:
"""simple docstring"""
lowercase__ = len(lowercase ) // 2
lowercase__ = len(lowercase )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(lowercase ) )
def _lowerCAmelCase ( lowercase : str ) ->bool:
"""simple docstring"""
if len(lowercase ) <= 2:
return True
if s[0] == s[len(lowercase ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def _lowerCAmelCase ( lowercase : str ) ->bool:
"""simple docstring"""
return s == s[::-1]
def _lowerCAmelCase ( lowercase : str ) ->None:
"""simple docstring"""
lowercase__ = F'''all({name}(key) is value for key, value in test_data.items())'''
lowercase__ = F'''from __main__ import test_data, {name}'''
lowercase__ = 5_0_0_0_0_0
lowercase__ = timeit(stmt=lowercase , setup=lowercase , number=lowercase )
print(F'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f'''{key:21} {value}''')
print("a man a plan a canal panama")
# finished 500,000 runs in 0.46793 seconds
benchmark_function("is_palindrome_slice")
# finished 500,000 runs in 0.85234 seconds
benchmark_function("is_palindrome")
# finished 500,000 runs in 1.32028 seconds
benchmark_function("is_palindrome_recursive")
# finished 500,000 runs in 2.08679 seconds
benchmark_function("is_palindrome_traversal")
| 161 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def _lowerCAmelCase ( lowercase : Union[dict, list, tuple, torch.Tensor] ) ->List[Tuple[int, ...]]:
"""simple docstring"""
lowercase__ = []
if isinstance(lowercase , lowercase ):
for v in tree.values():
shapes.extend(_fetch_dims(lowercase ) )
elif isinstance(lowercase , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(lowercase ) )
elif isinstance(lowercase , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('''Not supported''' )
return shapes
@torch.jit.ignore
def _lowerCAmelCase ( lowercase : int , lowercase : Tuple[int, ...] ) ->Tuple[int, ...]:
"""simple docstring"""
lowercase__ = []
for d in reversed(lowercase ):
idx.append(flat_idx % d )
lowercase__ = flat_idx // d
return tuple(reversed(lowercase ) )
@torch.jit.ignore
def _lowerCAmelCase ( lowercase : Sequence[int] , lowercase : Sequence[int] , lowercase : Sequence[int] , lowercase : Optional[Sequence[bool]] = None , lowercase : Optional[Sequence[bool]] = None , ) ->List[Tuple[slice, ...]]:
"""simple docstring"""
def reduce_edge_list(lowercase : List[bool] ) -> None:
lowercase__ = True
for i in range(len(lowercase ) ):
lowercase__ = -1 * (i + 1)
l[reversed_idx] &= tally
lowercase__ = l[reversed_idx]
if start_edges is None:
lowercase__ = [s == 0 for s in start]
reduce_edge_list(lowercase )
if end_edges is None:
lowercase__ = [e == (d - 1) for e, d in zip(lowercase , lowercase )]
reduce_edge_list(lowercase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowercase ) == 0:
return [()]
elif len(lowercase ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
lowercase__ = []
lowercase__ = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowercase , lowercase ):
if s == e:
path_list.append(slice(lowercase , s + 1 ) )
else:
break
lowercase__ = tuple(lowercase )
lowercase__ = len(lowercase )
# start == end, and we're done
if divergence_idx == len(lowercase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
lowercase__ = start[divergence_idx]
return tuple(
path + (slice(lowercase , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
lowercase__ = end[divergence_idx]
return tuple(
path + (slice(lowercase , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
lowercase__ = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def _lowerCAmelCase ( lowercase : torch.Tensor , lowercase : int , lowercase : int , lowercase : int ) ->torch.Tensor:
"""simple docstring"""
lowercase__ = t.shape[:no_batch_dims]
lowercase__ = list(_flat_idx_to_idx(lowercase , lowercase ) )
# _get_minimal_slice_set is inclusive
lowercase__ = list(_flat_idx_to_idx(flat_end - 1 , lowercase ) )
# Get an ordered list of slices to perform
lowercase__ = _get_minimal_slice_set(
lowercase , lowercase , lowercase , )
lowercase__ = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def _lowerCAmelCase ( lowercase : Callable , lowercase : Dict[str, Any] , lowercase : int , lowercase : int , lowercase : bool = False , lowercase : Any = None , lowercase : bool = False , ) ->Any:
"""simple docstring"""
if not (len(lowercase ) > 0):
raise ValueError('''Must provide at least one input''' )
lowercase__ = [shape[:no_batch_dims] for shape in _fetch_dims(lowercase )]
lowercase__ = tuple([max(lowercase ) for s in zip(*lowercase )] )
def _prep_inputs(lowercase : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
lowercase__ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
lowercase__ = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
lowercase__ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
lowercase__ = tensor_tree_map(_prep_inputs , lowercase )
lowercase__ = None
if _out is not None:
lowercase__ = tensor_tree_map(lambda lowercase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
lowercase__ = 1
for d in orig_batch_dims:
flat_batch_dim *= d
lowercase__ = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowercase : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
lowercase__ = 0
lowercase__ = prepped_outputs
for _ in range(lowercase ):
# Chunk the input
if not low_mem:
lowercase__ = _select_chunk
else:
lowercase__ = partial(
_chunk_slice , flat_start=lowercase , flat_end=min(lowercase , i + chunk_size ) , no_batch_dims=len(lowercase ) , )
lowercase__ = tensor_tree_map(lowercase , lowercase )
# Run the layer on the chunk
lowercase__ = layer(**lowercase )
# Allocate space for the output
if out is None:
lowercase__ = tensor_tree_map(lambda lowercase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , lowercase )
# Put the chunk in its pre-allocated space
if isinstance(lowercase , lowercase ):
def assign(lowercase : dict , lowercase : dict ) -> None:
for k, v in da.items():
if isinstance(lowercase , lowercase ):
assign(lowercase , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
lowercase__ = da[k]
assign(lowercase , lowercase )
elif isinstance(lowercase , lowercase ):
for xa, xa in zip(lowercase , lowercase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
lowercase__ = xa
elif isinstance(lowercase , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
lowercase__ = output_chunk
else:
raise ValueError('''Not supported''' )
i += chunk_size
lowercase__ = tensor_tree_map(lambda lowercase : t.view(orig_batch_dims + t.shape[1:] ) , lowercase )
return out
class __A :
"""simple docstring"""
def __init__( self , _lowerCamelCase = 5_1_2 , )-> Optional[Any]:
lowercase__ = max_chunk_size
lowercase__ = None
lowercase__ = None
def snake_case_( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )-> int:
logging.info('''Tuning chunk size...''' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
lowercase__ = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
lowercase__ = [c for c in candidates if c > min_chunk_size]
lowercase__ = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(_lowerCamelCase ) -> bool:
try:
with torch.no_grad():
fn(*_lowerCamelCase , chunk_size=_lowerCamelCase )
return True
except RuntimeError:
return False
lowercase__ = 0
lowercase__ = len(_lowerCamelCase ) - 1
while i > min_viable_chunk_size_index:
lowercase__ = test_chunk_size(candidates[i] )
if not viable:
lowercase__ = (min_viable_chunk_size_index + i) // 2
else:
lowercase__ = i
lowercase__ = (i + len(_lowerCamelCase ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def snake_case_( self , _lowerCamelCase , _lowerCamelCase )-> bool:
lowercase__ = True
for aa, aa in zip(_lowerCamelCase , _lowerCamelCase ):
assert type(_lowerCamelCase ) == type(_lowerCamelCase )
if isinstance(_lowerCamelCase , (list, tuple) ):
consistent &= self._compare_arg_caches(_lowerCamelCase , _lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
lowercase__ = [v for _, v in sorted(aa.items() , key=lambda _lowerCamelCase : x[0] )]
lowercase__ = [v for _, v in sorted(aa.items() , key=lambda _lowerCamelCase : x[0] )]
consistent &= self._compare_arg_caches(_lowerCamelCase , _lowerCamelCase )
else:
consistent &= aa == aa
return consistent
def snake_case_( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , )-> int:
lowercase__ = True
lowercase__ = tree_map(lambda _lowerCamelCase : a.shape if isinstance(_lowerCamelCase , torch.Tensor ) else a , _lowerCamelCase , _lowerCamelCase )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(_lowerCamelCase )
lowercase__ = self._compare_arg_caches(self.cached_arg_data , _lowerCamelCase )
else:
# Otherwise, we can reuse the precomputed value
lowercase__ = False
if not consistent:
lowercase__ = self._determine_favorable_chunk_size(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , )
lowercase__ = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 161 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
snake_case_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
_A = ["pixel_values"]
def __init__( self , lowercase__ = True , lowercase__ = None , lowercase__ = PILImageResampling.BILINEAR , lowercase__ = True , lowercase__ = 1 / 255 , lowercase__ = True , lowercase__ = None , lowercase__ = True , **lowercase__ , ):
"""simple docstring"""
super().__init__(**lowercase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = size if size is not None else {"shortest_edge": 224}
SCREAMING_SNAKE_CASE_ : Optional[int] = get_size_dict(lowercase__ , default_to_square=lowercase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = crop_size if crop_size is not None else {"height": 256, "width": 256}
SCREAMING_SNAKE_CASE_ : Dict = get_size_dict(lowercase__ , param_name="crop_size" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = do_resize
SCREAMING_SNAKE_CASE_ : Union[str, Any] = size
SCREAMING_SNAKE_CASE_ : List[str] = resample
SCREAMING_SNAKE_CASE_ : Dict = do_rescale
SCREAMING_SNAKE_CASE_ : List[Any] = rescale_factor
SCREAMING_SNAKE_CASE_ : Optional[Any] = do_center_crop
SCREAMING_SNAKE_CASE_ : Dict = crop_size
SCREAMING_SNAKE_CASE_ : str = do_flip_channel_order
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ = PIL.Image.BILINEAR , lowercase__ = None , **lowercase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = get_size_dict(lowercase__ , default_to_square=lowercase__ )
if "shortest_edge" not in size:
raise ValueError(F"The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}" )
SCREAMING_SNAKE_CASE_ : List[Any] = get_resize_output_image_size(lowercase__ , size=size["shortest_edge"] , default_to_square=lowercase__ )
return resize(lowercase__ , size=lowercase__ , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(lowercase__ , size=(size["height"], size["width"]) , data_format=lowercase__ , **lowercase__ )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
"""simple docstring"""
return rescale(lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __lowerCamelCase ( self , lowercase__ , lowercase__ = None ):
"""simple docstring"""
return flip_channel_order(lowercase__ , data_format=lowercase__ )
def __lowerCamelCase ( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ : str = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ : int = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
SCREAMING_SNAKE_CASE_ : Tuple = size if size is not None else self.size
SCREAMING_SNAKE_CASE_ : List[str] = get_size_dict(lowercase__ , default_to_square=lowercase__ )
SCREAMING_SNAKE_CASE_ : str = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE_ : Tuple = get_size_dict(lowercase__ , param_name="crop_size" )
SCREAMING_SNAKE_CASE_ : Optional[Any] = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ : Any = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.resize(image=lowercase__ , size=lowercase__ , resample=lowercase__ ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE_ : int = [self.center_crop(image=lowercase__ , size=lowercase__ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_ : str = [self.rescale(image=lowercase__ , scale=lowercase__ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.flip_channel_order(image=lowercase__ ) for image in images]
SCREAMING_SNAKE_CASE_ : List[Any] = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
SCREAMING_SNAKE_CASE_ : int = {"pixel_values": images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
def __lowerCamelCase ( self , lowercase__ , lowercase__ = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(lowercase__ ):
SCREAMING_SNAKE_CASE_ : int = target_sizes.numpy()
SCREAMING_SNAKE_CASE_ : List[Any] = []
for idx in range(len(lowercase__ ) ):
SCREAMING_SNAKE_CASE_ : Tuple = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=lowercase__ )
SCREAMING_SNAKE_CASE_ : Tuple = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase__ )
else:
SCREAMING_SNAKE_CASE_ : Any = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE_ : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 704 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self , lowercase__ , lowercase__=7 , lowercase__=3 , lowercase__=18 , lowercase__=30 , lowercase__=400 , lowercase__=True , lowercase__=None , lowercase__=True , lowercase__=None , lowercase__=True , lowercase__=[0.48145466, 0.4578275, 0.40821073] , lowercase__=[0.26862954, 0.26130258, 0.27577711] , lowercase__=True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = size if size is not None else {"height": 224, "width": 224}
SCREAMING_SNAKE_CASE_ : Optional[Any] = crop_size if crop_size is not None else {"height": 18, "width": 18}
SCREAMING_SNAKE_CASE_ : str = parent
SCREAMING_SNAKE_CASE_ : List[Any] = batch_size
SCREAMING_SNAKE_CASE_ : Dict = num_channels
SCREAMING_SNAKE_CASE_ : Any = image_size
SCREAMING_SNAKE_CASE_ : Tuple = min_resolution
SCREAMING_SNAKE_CASE_ : Optional[Any] = max_resolution
SCREAMING_SNAKE_CASE_ : Tuple = do_resize
SCREAMING_SNAKE_CASE_ : List[str] = size
SCREAMING_SNAKE_CASE_ : str = do_center_crop
SCREAMING_SNAKE_CASE_ : List[str] = crop_size
SCREAMING_SNAKE_CASE_ : int = do_normalize
SCREAMING_SNAKE_CASE_ : Optional[int] = image_mean
SCREAMING_SNAKE_CASE_ : Dict = image_std
SCREAMING_SNAKE_CASE_ : List[Any] = do_convert_rgb
def __lowerCamelCase ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __lowerCamelCase ( self , lowercase__=False , lowercase__=False , lowercase__=False ):
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
SCREAMING_SNAKE_CASE_ : str = []
for i in range(self.batch_size ):
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[Any] = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
SCREAMING_SNAKE_CASE_ : str = [Image.fromarray(np.moveaxis(lowercase__ , 0 , -1 ) ) for x in image_inputs]
if torchify:
SCREAMING_SNAKE_CASE_ : List[str] = [torch.from_numpy(lowercase__ ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase,unittest.TestCase ):
_A = ChineseCLIPImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = ChineseCLIPImageProcessingTester(self , do_center_crop=lowercase__ )
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase__ , "do_resize" ) )
self.assertTrue(hasattr(lowercase__ , "size" ) )
self.assertTrue(hasattr(lowercase__ , "do_center_crop" ) )
self.assertTrue(hasattr(lowercase__ , "center_crop" ) )
self.assertTrue(hasattr(lowercase__ , "do_normalize" ) )
self.assertTrue(hasattr(lowercase__ , "image_mean" ) )
self.assertTrue(hasattr(lowercase__ , "image_std" ) )
self.assertTrue(hasattr(lowercase__ , "do_convert_rgb" ) )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 224, "width": 224} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
SCREAMING_SNAKE_CASE_ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def __lowerCamelCase ( self ):
"""simple docstring"""
pass
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ : Tuple = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(lowercase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase__ , numpify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ : List[str] = image_processing(lowercase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase__ , torchify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ : int = image_processing(lowercase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase,unittest.TestCase ):
_A = ChineseCLIPImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=lowercase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 3
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase__ , "do_resize" ) )
self.assertTrue(hasattr(lowercase__ , "size" ) )
self.assertTrue(hasattr(lowercase__ , "do_center_crop" ) )
self.assertTrue(hasattr(lowercase__ , "center_crop" ) )
self.assertTrue(hasattr(lowercase__ , "do_normalize" ) )
self.assertTrue(hasattr(lowercase__ , "image_mean" ) )
self.assertTrue(hasattr(lowercase__ , "image_std" ) )
self.assertTrue(hasattr(lowercase__ , "do_convert_rgb" ) )
def __lowerCamelCase ( self ):
"""simple docstring"""
pass
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ : Optional[int] = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ : List[str] = image_processing(lowercase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 68 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__a : int = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
__a : Optional[int] = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
__a : List[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowercase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} )
SCREAMING_SNAKE_CASE = field(
default=lowercase_ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
SCREAMING_SNAKE_CASE = field(
default=lowercase_ , metadata={"help": "The column name of the images in the files. If not set, will try to use 'image' or 'img'."} , )
SCREAMING_SNAKE_CASE = field(default=lowercase_ , metadata={"help": "A folder containing the training data."} )
SCREAMING_SNAKE_CASE = field(default=lowercase_ , metadata={"help": "A folder containing the validation data."} )
SCREAMING_SNAKE_CASE = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
SCREAMING_SNAKE_CASE = field(default=32 , metadata={"help": "The size of the square patches to use for masking."} )
SCREAMING_SNAKE_CASE = field(
default=0.6 , metadata={"help": "Percentage of patches to mask."} , )
SCREAMING_SNAKE_CASE = field(
default=lowercase_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowercase_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
__A = {}
if self.train_dir is not None:
__A = self.train_dir
if self.validation_dir is not None:
__A = self.validation_dir
__A = data_files if data_files else None
@dataclass
class __lowercase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = field(
default=lowercase_ , metadata={
"help": (
"The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a "
"checkpoint identifier on the hub. "
"Don't set if you want to train a model from scratch."
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowercase_ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(lowercase_ )} , )
SCREAMING_SNAKE_CASE = field(
default=lowercase_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE = field(
default=lowercase_ , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowercase_ , metadata={"help": "Where do you want to store (cache) the pretrained models/datasets downloaded from the hub"} , )
SCREAMING_SNAKE_CASE = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
SCREAMING_SNAKE_CASE = field(default=lowercase_ , metadata={"help": "Name or path of preprocessor config."} )
SCREAMING_SNAKE_CASE = field(
default=lowercase_ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowercase_ , metadata={
"help": (
"The size (resolution) of each image. If not specified, will use `image_size` of the configuration."
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowercase_ , metadata={
"help": (
"The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration."
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowercase_ , metadata={"help": "Stride to use for the encoder."} , )
class __lowercase :
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase_ : Optional[int]=192 , UpperCamelCase_ : Any=32 , UpperCamelCase_ : int=4 , UpperCamelCase_ : List[Any]=0.6 ):
"""simple docstring"""
__A = input_size
__A = mask_patch_size
__A = model_patch_size
__A = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError("""Input size must be divisible by mask patch size""" )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError("""Mask patch size must be divisible by model patch size""" )
__A = self.input_size // self.mask_patch_size
__A = self.mask_patch_size // self.model_patch_size
__A = self.rand_size**2
__A = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : Optional[Any] ):
"""simple docstring"""
__A = np.random.permutation(self.token_count )[: self.mask_count]
__A = np.zeros(self.token_count , dtype=UpperCamelCase_ )
__A = 1
__A = mask.reshape((self.rand_size, self.rand_size) )
__A = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def _SCREAMING_SNAKE_CASE ( __lowercase : List[Any] ) -> Optional[int]:
"""simple docstring"""
__A = torch.stack([example["""pixel_values"""] for example in examples] )
__A = torch.stack([example["""mask"""] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def _SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
__A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__A , __A , __A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__A , __A , __A = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mim""" , __lowercase , __lowercase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__A = training_args.get_process_log_level()
logger.setLevel(__lowercase )
transformers.utils.logging.set_verbosity(__lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
__A = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__A = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
__A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
__A = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __lowercase ) and data_args.train_val_split > 0.0:
__A = ds["""train"""].train_test_split(data_args.train_val_split )
__A = split["""train"""]
__A = split["""test"""]
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__A = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
__A = AutoConfig.from_pretrained(model_args.config_name_or_path , **__lowercase )
elif model_args.model_name_or_path:
__A = AutoConfig.from_pretrained(model_args.model_name_or_path , **__lowercase )
else:
__A = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(f"New config: {config}" )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(__lowercase , """decoder_type""" ):
__A = """simmim"""
# adapt config
__A = model_args.image_size if model_args.image_size is not None else config.image_size
__A = model_args.patch_size if model_args.patch_size is not None else config.patch_size
__A = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
"""image_size""": model_args.image_size,
"""patch_size""": model_args.patch_size,
"""encoder_stride""": model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
__A = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **__lowercase )
elif model_args.model_name_or_path:
__A = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **__lowercase )
else:
__A = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
__A = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
__A = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
__A = AutoModelForMaskedImageModeling.from_config(__lowercase )
if training_args.do_train:
__A = ds["""train"""].column_names
else:
__A = ds["""validation"""].column_names
if data_args.image_column_name is not None:
__A = data_args.image_column_name
elif "image" in column_names:
__A = """image"""
elif "img" in column_names:
__A = """img"""
else:
__A = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
__A = Compose(
[
Lambda(lambda __lowercase : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
__A = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(__lowercase : Optional[Any] ):
__A = [transforms(__lowercase ) for image in examples[image_column_name]]
__A = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
__A = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__lowercase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
__A = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__lowercase )
# Initialize our trainer
__A = Trainer(
model=__lowercase , args=__lowercase , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=__lowercase , data_collator=__lowercase , )
# Training
if training_args.do_train:
__A = None
if training_args.resume_from_checkpoint is not None:
__A = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__A = last_checkpoint
__A = trainer.train(resume_from_checkpoint=__lowercase )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__A = trainer.evaluate()
trainer.log_metrics("""eval""" , __lowercase )
trainer.save_metrics("""eval""" , __lowercase )
# Write model card and (optionally) push to hub
__A = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """masked-image-modeling""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-image-modeling"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowercase )
else:
trainer.create_model_card(**__lowercase )
if __name__ == "__main__":
main()
| 637 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__a : List[Any] = logging.get_logger(__name__)
# TODO: upload to AWS
__a : Union[str, Any] = {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"
),
}
class __lowercase ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = "retribert"
def __init__( self : Any , UpperCamelCase_ : int=30_522 , UpperCamelCase_ : Optional[int]=768 , UpperCamelCase_ : Union[str, Any]=8 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : List[Any]=3_072 , UpperCamelCase_ : Tuple="gelu" , UpperCamelCase_ : str=0.1 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : List[str]=512 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : List[str]=1e-12 , UpperCamelCase_ : Any=True , UpperCamelCase_ : List[Any]=128 , UpperCamelCase_ : Union[str, Any]=0 , **UpperCamelCase_ : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = hidden_act
__A = intermediate_size
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = initializer_range
__A = layer_norm_eps
__A = share_encoders
__A = projection_dim
| 637 | 1 |
"""simple docstring"""
from __future__ import annotations
def snake_case (A_ :list[int] ):
'''simple docstring'''
if len(A_ ) == 0:
return array
a, a : Any = min(A_ ), max(A_ )
# Compute the variables
a : int = _max - _min + 1
a, a : Optional[Any] = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
a : List[str] = i - _min
a : Optional[Any] = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
a : List[Any] = 0
for i in range(A_ ):
while holes_repeat[i] > 0:
a : str = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase : Optional[int] = input('Enter numbers separated by comma:\n')
_UpperCamelCase : Union[str, Any] = [int(x) for x in user_input.split(',')]
print(pigeon_sort(unsorted))
| 118 |
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 118 | 1 |
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) -> int:
"""simple docstring"""
UpperCamelCase = 0
while b > 0:
if b & 1:
UpperCamelCase = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 280 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _A ( SCREAMING_SNAKE_CASE ):
# A local function to see if a dot lands in the circle.
def is_in_circle(SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ) -> bool:
UpperCAmelCase__: Any = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
UpperCAmelCase__: Optional[int] = mean(
int(is_in_circle(uniform(-1.0 ,1.0 ) ,uniform(-1.0 ,1.0 ) ) )
for _ in range(SCREAMING_SNAKE_CASE ) )
# The ratio of the area for circle to square is pi/4.
UpperCAmelCase__: Optional[int] = proportion * 4
print(f"The estimated value of pi is {pi_estimate}" )
print(f"The numpy value of pi is {pi}" )
print(f"The total error is {abs(pi - pi_estimate )}" )
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE = 0.0 ,SCREAMING_SNAKE_CASE = 1.0 ,):
return mean(
function_to_integrate(uniform(SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ) ) for _ in range(SCREAMING_SNAKE_CASE ) ) * (max_value - min_value)
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE = 0.0 ,SCREAMING_SNAKE_CASE = 1.0 ):
def identity_function(SCREAMING_SNAKE_CASE ) -> float:
return x
UpperCAmelCase__: Tuple = area_under_curve_estimator(
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE )
UpperCAmelCase__: Optional[Any] = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(f"Estimating area under y=x where x varies from {min_value} to {max_value}" )
print(f"Estimated value is {estimated_value}" )
print(f"Expected value is {expected_value}" )
print(f"Total error is {abs(estimated_value - expected_value )}" )
print("******************" )
def _A ( SCREAMING_SNAKE_CASE ):
def function_to_integrate(SCREAMING_SNAKE_CASE ) -> float:
return sqrt(4.0 - x * x )
UpperCAmelCase__: Union[str, Any] = area_under_curve_estimator(
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,0.0 ,2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(f"Estimated value is {estimated_value}" )
print(f"Expected value is {pi}" )
print(f"Total error is {abs(estimated_value - pi )}" )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod() | 113 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 535 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case = logging.get_logger(__name__)
snake_case = {"""vocab_file""": """spiece.model"""}
snake_case = {
"""vocab_file""": {
"""AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""",
}
}
snake_case = {
"""AI-Sweden/gpt-sw3-126m""": 2_0_4_8,
"""AI-Sweden/gpt-sw3-350m""": 2_0_4_8,
"""AI-Sweden/gpt-sw3-1.6b""": 2_0_4_8,
"""AI-Sweden/gpt-sw3-6.7b""": 2_0_4_8,
"""AI-Sweden/gpt-sw3-20b""": 2_0_4_8,
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : str = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self : Any ,__A : Tuple ,__A : Any=False ,__A : int=False ,__A : List[str]=False ,__A : List[str]=None ,__A : Dict=None ,__A : Dict=None ,__A : Union[str, Any]=None ,__A : Optional[Dict[str, Any]] = None ,**__A : Tuple ,) -> None:
_lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
_lowercase = kwargs.get('name_or_path' )
if name_or_path is None:
logger.warning(
'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'
' you are testing the model, this can safely be ignored' )
_lowercase = 'None'
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_lowercase = '<|endoftext|>' if eos_token is None else eos_token
_lowercase = '<unk>' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_lowercase = unk_token if pad_token is None else pad_token
_lowercase = eos_token if bos_token is None else bos_token
else:
_lowercase = '<pad>' if pad_token is None else pad_token
_lowercase = '<s>' if bos_token is None else bos_token
super().__init__(
do_lower_case=__A ,remove_space=__A ,keep_accents=__A ,bos_token=__A ,eos_token=__A ,unk_token=__A ,pad_token=__A ,sp_model_kwargs=self.sp_model_kwargs ,**__A ,)
_lowercase = do_lower_case
_lowercase = remove_space
_lowercase = keep_accents
_lowercase = vocab_file
_lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__A )
# Used for whitespace normalization in input texts
# fmt : off
_lowercase = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', ''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_lowercase = re.compile(
F"""[{"".join(map(__A ,list(range(0 ,9 ) ) + list(range(11 ,32 ) ) + list(range(127 ,160 ) ) + [160, 173, 8203] ) )}]""" )
def __getstate__( self : List[Any] ) -> List[str]:
_lowercase = self.__dict__.copy()
_lowercase = None
return state
def __setstate__( self : Optional[Any] ,__A : Dict ) -> str:
_lowercase = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_lowercase = {}
_lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def __UpperCAmelCase ( self : List[Any] ) -> int:
return len(self.sp_model )
def __UpperCAmelCase ( self : Optional[Any] ,__A : str ) -> str:
_lowercase = self.non_printing_characters_re.sub('' ,__A )
# Normalize whitespaces
_lowercase = ''.join([char if char not in self.whitespaces else ' ' for char in text] )
# NFC Unicode normalization
_lowercase = unicodedata.normalize('NFC' ,__A )
return text
def __UpperCAmelCase ( self : Optional[Any] ,__A : str ,**__A : Optional[int] ) -> List[str]:
_lowercase = self.preprocess_text(__A )
return self.sp_model.encode(__A ,out_type=__A )
def __UpperCAmelCase ( self : List[Any] ,__A : str ) -> int:
return self.sp_model.PieceToId(__A )
def __UpperCAmelCase ( self : Any ,__A : int ) -> str:
return self.sp_model.IdToPiece(__A )
@staticmethod
def __UpperCAmelCase ( __A : str ) -> str:
return out_string
def __UpperCAmelCase ( self : Tuple ,__A : List[str] ) -> str:
_lowercase = []
_lowercase = ''
_lowercase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
_lowercase = True
_lowercase = []
else:
current_sub_tokens.append(__A )
_lowercase = False
out_string += self.sp_model.decode(__A )
return out_string
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict[str, int]:
_lowercase = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCAmelCase ( self : Optional[int] ,__A : str ,__A : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowercase = os.path.join(
__A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,__A )
elif not os.path.isfile(self.vocab_file ):
with open(__A ,'wb' ) as fi:
_lowercase = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def __UpperCAmelCase ( self : str ,__A : Union[str, List[str]] ,__A : Union[str, bool] = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
if isinstance(__A ,__A ):
_lowercase = self.preprocess_text(__A )
_lowercase = self.sp_model.encode(__A )
else:
_lowercase = [self.preprocess_text(__A ) for t in text]
_lowercase = self.sp_model.encode(__A )
if return_tensors is True or return_tensors == "pt":
_lowercase = torch.tensor(__A )
return token_ids
def __UpperCAmelCase ( self : Optional[Any] ,__A : Union[int, List[int]] ) -> str:
return self.sp_model.decode(__A )
def __UpperCAmelCase ( self : str ,__A : "Conversation" ) -> List[int]:
_lowercase = [F"""User: {text}""" if is_user else F"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
_lowercase = (
F"""{self.eos_token}{self.bos_token}""" + F"""{self.bos_token}""".join(__A ) + F"""{self.bos_token}Bot:"""
)
return self.encode(text=__A ) | 535 | 1 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _lowerCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : Union[str, Any] = VideoToVideoSDPipeline
A_ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"""video"""} ) - {"""image""", """width""", """height"""}
A_ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""video"""} ) - {"""image"""}
A_ : Optional[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
A_ : Dict = False
# No `output_type`.
A_ : Optional[Any] = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
torch.manual_seed(0 )
__magic_name__ : Dict = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , )
__magic_name__ : List[Any] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_A , set_alpha_to_one=_A , )
torch.manual_seed(0 )
__magic_name__ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__magic_name__ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
__magic_name__ : Dict = CLIPTextModel(_A )
__magic_name__ : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__magic_name__ : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def __lowerCAmelCase ( self : List[Any] , _A : Dict , _A : Union[str, Any]=0 ) -> Dict:
# 3 frames
__magic_name__ : str = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
if str(_A ).startswith('mps' ):
__magic_name__ : Dict = torch.manual_seed(_A )
else:
__magic_name__ : List[Any] = torch.Generator(device=_A ).manual_seed(_A )
__magic_name__ : str = {
'prompt': 'A painting of a squirrel eating a burger',
'video': video,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def __lowerCAmelCase ( self : str ) -> Optional[int]:
__magic_name__ : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
__magic_name__ : Optional[int] = self.get_dummy_components()
__magic_name__ : Optional[int] = VideoToVideoSDPipeline(**_A )
__magic_name__ : Any = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
__magic_name__ : Dict = self.get_dummy_inputs(_A )
__magic_name__ : Optional[int] = 'np'
__magic_name__ : List[str] = sd_pipe(**_A ).frames
__magic_name__ : Any = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
__magic_name__ : int = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __lowerCAmelCase ( self : Tuple ) -> int:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_A , expected_max_diff=5E-3 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def __lowerCAmelCase ( self : Optional[int] ) -> List[str]:
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def __lowerCAmelCase ( self : str ) -> Tuple:
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
pass
def __lowerCAmelCase ( self : Dict ) -> Optional[Any]:
return super().test_progress_bar()
@slow
@skip_mps
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
__magic_name__ : List[str] = VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL' , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
__magic_name__ : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 )
__magic_name__ : Union[str, Any] = torch.randn((1, 10, 3, 1024, 576) , generator=_A )
__magic_name__ : Optional[Any] = video.to('cuda' )
__magic_name__ : Dict = 'Spiderman is surfing'
__magic_name__ : Tuple = pipe(_A , video=_A , generator=_A , num_inference_steps=3 , output_type='pt' ).frames
__magic_name__ : Dict = np.array([-1.045_8984, -1.127_9297, -0.966_3086, -0.9150_3906, -0.7509_7656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2 | 561 |
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowerCAmelCase :Dict = datasets.utils.logging.get_logger(__name__)
class _lowerCamelCase ( folder_based_builder.FolderBasedBuilderConfig ):
'''simple docstring'''
A_ : bool = None
A_ : bool = None
class _lowerCamelCase ( folder_based_builder.FolderBasedBuilder ):
'''simple docstring'''
A_ : Union[str, Any] = datasets.Audio()
A_ : Tuple = """audio"""
A_ : Optional[Any] = AudioFolderConfig
A_ : List[str] # definition at the bottom of the script
A_ : Any = AudioClassification(audio_column="""audio""" , label_column="""label""" )
lowerCAmelCase :List[str] = [
'''.aiff''',
'''.au''',
'''.avr''',
'''.caf''',
'''.flac''',
'''.htk''',
'''.svx''',
'''.mat4''',
'''.mat5''',
'''.mpc2k''',
'''.ogg''',
'''.paf''',
'''.pvf''',
'''.raw''',
'''.rf64''',
'''.sd2''',
'''.sds''',
'''.ircam''',
'''.voc''',
'''.w64''',
'''.wav''',
'''.nist''',
'''.wavex''',
'''.wve''',
'''.xi''',
'''.mp3''',
'''.opus''',
]
lowerCAmelCase :str = AUDIO_EXTENSIONS | 561 | 1 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class __a ( _snake_case ):
__SCREAMING_SNAKE_CASE : Any = 'umt5'
__SCREAMING_SNAKE_CASE : Optional[int] = ['past_key_values']
def __init__( self : Union[str, Any] , lowercase__ : Optional[int]=25_01_12 , lowercase__ : int=5_12 , lowercase__ : List[Any]=64 , lowercase__ : Optional[Any]=10_24 , lowercase__ : List[Any]=8 , lowercase__ : Any=None , lowercase__ : Any=6 , lowercase__ : List[str]=32 , lowercase__ : Union[str, Any]=1_28 , lowercase__ : Optional[Any]=0.1 , lowercase__ : Union[str, Any]=1e-6 , lowercase__ : int=1.0 , lowercase__ : str="gated-gelu" , lowercase__ : Optional[int]=True , lowercase__ : int=True , lowercase__ : Optional[int]="T5Tokenizer" , lowercase__ : Optional[Any]=True , lowercase__ : List[Any]=0 , lowercase__ : Tuple=1 , lowercase__ : Dict=0 , **lowercase__ : Optional[int] , ) ->Optional[int]:
"""simple docstring"""
super().__init__(
is_encoder_decoder=lowercase__ , tokenizer_class=lowercase__ , tie_word_embeddings=lowercase__ , pad_token_id=lowercase__ , eos_token_id=lowercase__ , decoder_start_token_id=lowercase__ , **lowercase__ , )
_lowercase = vocab_size
_lowercase = d_model
_lowercase = d_kv
_lowercase = d_ff
_lowercase = num_layers
_lowercase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_lowercase = num_heads
_lowercase = relative_attention_num_buckets
_lowercase = relative_attention_max_distance
_lowercase = dropout_rate
_lowercase = layer_norm_epsilon
_lowercase = initializer_factor
_lowercase = feed_forward_proj
_lowercase = use_cache
_lowercase = self.feed_forward_proj.split("""-""")
_lowercase = act_info[-1]
_lowercase = act_info[0] == """gated"""
if len(lowercase__) > 1 and act_info[0] != "gated" or len(lowercase__) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""")
if feed_forward_proj == "gated-gelu":
_lowercase = """gelu_new"""
@property
def _UpperCAmelCase ( self : Tuple) ->str:
"""simple docstring"""
return self.d_model
@property
def _UpperCAmelCase ( self : str) ->List[Any]:
"""simple docstring"""
return self.num_heads
@property
def _UpperCAmelCase ( self : List[str]) ->str:
"""simple docstring"""
return self.num_layers
class __a ( _snake_case ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def _UpperCAmelCase ( self : Optional[Any]) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
_lowercase = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
_lowercase = """past_encoder_sequence + sequence"""
_lowercase = {0: """batch"""}
_lowercase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
_lowercase = {0: """batch""", 1: """decoder_sequence"""}
_lowercase = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(lowercase__ , direction="""inputs""")
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def _UpperCAmelCase ( self : Optional[int]) ->int:
"""simple docstring"""
return 13
@property
def _UpperCAmelCase ( self : Dict) ->float:
"""simple docstring"""
return 5e-4
| 709 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCamelCase = {'configuration_glpn': ['GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GLPNConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['GLPNFeatureExtractor']
_lowerCamelCase = ['GLPNImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'GLPN_PRETRAINED_MODEL_ARCHIVE_LIST',
'GLPNForDepthEstimation',
'GLPNLayer',
'GLPNModel',
'GLPNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 572 | 0 |
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class UpperCamelCase ( nn.Module ):
def __init__( self ):
"""simple docstring"""
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[int] = nn.Linear(3 , 4 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = nn.BatchNormad(4 )
_SCREAMING_SNAKE_CASE : Dict = nn.Linear(4 , 5 )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(snake_case__ ) ) )
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , *snake_case__ , **snake_case__ ):
"""simple docstring"""
return (args[0] + 1,) + args[1:], kwargs
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
return output + 1
class UpperCamelCase ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = ModelForTest()
_SCREAMING_SNAKE_CASE : Tuple = ModelHook()
add_hook_to_module(snake_case__ , snake_case__ )
self.assertEqual(test_model._hf_hook , snake_case__ )
self.assertTrue(hasattr(snake_case__ , "_old_forward" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , "forward" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["x"] )
remove_hook_from_module(snake_case__ )
self.assertFalse(hasattr(snake_case__ , "_hf_hook" ) )
self.assertFalse(hasattr(snake_case__ , "_old_forward" ) )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = ModelForTest()
_SCREAMING_SNAKE_CASE : Optional[Any] = ModelHook()
add_hook_to_module(snake_case__ , snake_case__ )
add_hook_to_module(snake_case__ , snake_case__ , append=snake_case__ )
self.assertEqual(isinstance(test_model._hf_hook , snake_case__ ) , snake_case__ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(snake_case__ , "_old_forward" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , "forward" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["x"] )
remove_hook_from_module(snake_case__ )
self.assertFalse(hasattr(snake_case__ , "_hf_hook" ) )
self.assertFalse(hasattr(snake_case__ , "_old_forward" ) )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = ModelForTest()
_SCREAMING_SNAKE_CASE : Any = torch.randn(2 , 3 )
_SCREAMING_SNAKE_CASE : Any = test_model(x + 1 )
_SCREAMING_SNAKE_CASE : List[str] = test_model(x + 2 )
_SCREAMING_SNAKE_CASE : Any = PreForwardHook()
add_hook_to_module(snake_case__ , snake_case__ )
_SCREAMING_SNAKE_CASE : List[str] = test_model(snake_case__ )
self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_SCREAMING_SNAKE_CASE : Any = PreForwardHook()
add_hook_to_module(snake_case__ , snake_case__ )
_SCREAMING_SNAKE_CASE : str = test_model(snake_case__ )
self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_SCREAMING_SNAKE_CASE : Any = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(snake_case__ , snake_case__ )
_SCREAMING_SNAKE_CASE : Dict = test_model(snake_case__ )
assert torch.allclose(snake_case__ , snake_case__ , atol=1E-5 )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = ModelForTest()
_SCREAMING_SNAKE_CASE : Any = torch.randn(2 , 3 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = test_model(snake_case__ )
_SCREAMING_SNAKE_CASE : Optional[int] = PostForwardHook()
add_hook_to_module(snake_case__ , snake_case__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = test_model(snake_case__ )
self.assertTrue(torch.allclose(snake_case__ , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_SCREAMING_SNAKE_CASE : Tuple = PostForwardHook()
add_hook_to_module(snake_case__ , snake_case__ )
_SCREAMING_SNAKE_CASE : List[str] = test_model(snake_case__ )
self.assertTrue(torch.allclose(snake_case__ , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_SCREAMING_SNAKE_CASE : Tuple = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(snake_case__ , snake_case__ )
_SCREAMING_SNAKE_CASE : List[str] = test_model(snake_case__ )
assert torch.allclose(snake_case__ , output + 2 , atol=1E-5 )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = ModelForTest()
_SCREAMING_SNAKE_CASE : str = torch.randn(2 , 3 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = test_model(snake_case__ )
_SCREAMING_SNAKE_CASE : List[str] = PostForwardHook()
add_hook_to_module(snake_case__ , snake_case__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = test_model(snake_case__ )
self.assertTrue(torch.allclose(snake_case__ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
_SCREAMING_SNAKE_CASE : Tuple = True
_SCREAMING_SNAKE_CASE : Any = test_model(snake_case__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
_SCREAMING_SNAKE_CASE : Tuple = torch.randn(2 , 3 )
_SCREAMING_SNAKE_CASE : str = model(snake_case__ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(snake_case__ , AlignDevicesHook(io_same_device=snake_case__ ) )
_SCREAMING_SNAKE_CASE : Tuple = torch.randn(2 , 3 ).to(0 )
_SCREAMING_SNAKE_CASE : Optional[Any] = model(snake_case__ )
self.assertEqual(output.device , torch.device(0 ) )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
_SCREAMING_SNAKE_CASE : int = {"execution_device": 0 if torch.cuda.is_available() else "cpu", "offload": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**snake_case__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**snake_case__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**snake_case__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
_SCREAMING_SNAKE_CASE : Any = torch.device(hook_kwargs["execution_device"] )
self.assertEqual(model.batchnorm.running_mean.device , snake_case__ )
_SCREAMING_SNAKE_CASE : Dict = torch.randn(2 , 3 )
_SCREAMING_SNAKE_CASE : List[Any] = model(snake_case__ )
self.assertEqual(output.device , snake_case__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# Now test with buffers included in the offload
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
"execution_device": 0 if torch.cuda.is_available() else "cpu",
"offload": True,
"offload_buffers": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**snake_case__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**snake_case__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**snake_case__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) )
_SCREAMING_SNAKE_CASE : List[str] = torch.randn(2 , 3 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(snake_case__ )
self.assertEqual(output.device , snake_case__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
_SCREAMING_SNAKE_CASE : Optional[Any] = 0 if torch.cuda.is_available() else "cpu"
attach_align_device_hook(snake_case__ , execution_device=snake_case__ , offload=snake_case__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
_SCREAMING_SNAKE_CASE : List[str] = torch.device(snake_case__ )
self.assertEqual(model.batchnorm.running_mean.device , snake_case__ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn(2 , 3 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(snake_case__ )
self.assertEqual(output.device , snake_case__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(snake_case__ )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# Now test with buffers included in the offload
attach_align_device_hook(snake_case__ , execution_device=snake_case__ , offload=snake_case__ , offload_buffers=snake_case__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) )
_SCREAMING_SNAKE_CASE : Dict = torch.randn(2 , 3 )
_SCREAMING_SNAKE_CASE : Optional[Any] = model(snake_case__ )
self.assertEqual(output.device , snake_case__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(snake_case__ )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
_SCREAMING_SNAKE_CASE : Dict = 0 if torch.cuda.is_available() else "cpu"
attach_align_device_hook(
snake_case__ , execution_device=snake_case__ , offload=snake_case__ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
_SCREAMING_SNAKE_CASE : str = torch.device(snake_case__ )
self.assertEqual(model.batchnorm.running_mean.device , snake_case__ )
_SCREAMING_SNAKE_CASE : str = torch.randn(2 , 3 )
_SCREAMING_SNAKE_CASE : str = model(snake_case__ )
self.assertEqual(output.device , snake_case__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(snake_case__ )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
snake_case__ , execution_device=snake_case__ , offload=snake_case__ , weights_map=model.state_dict() , offload_buffers=snake_case__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) )
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.randn(2 , 3 )
_SCREAMING_SNAKE_CASE : Tuple = model(snake_case__ )
self.assertEqual(output.device , snake_case__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(snake_case__ )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
| 572 |
"""simple docstring"""
def _lowerCAmelCase ( lowerCamelCase__ : float ) -> float:
if edge <= 0 or not isinstance(lowerCamelCase__, lowerCamelCase__ ):
raise ValueError("Length must be a positive." )
return 3 * ((2_5 + 1_0 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def _lowerCAmelCase ( lowerCamelCase__ : float ) -> float:
if edge <= 0 or not isinstance(lowerCamelCase__, lowerCamelCase__ ):
raise ValueError("Length must be a positive." )
return ((1_5 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 572 | 1 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = prime_factors(__A )
if is_square_free(__A ):
return -1 if len(__A ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 |
lowerCAmelCase : str ={
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 693 | 0 |
import socket
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =socket.socket(socket.AF_INET ,socket.SOCK_STREAM)
_SCREAMING_SNAKE_CASE =socket.gethostname()
_SCREAMING_SNAKE_CASE =1_2312
sock.connect((host, port))
sock.send(b'''Hello server!''')
with open('''Received_file''' ,'''wb''') as out_file:
print('''File opened''')
print('''Receiving data...''')
while True:
_SCREAMING_SNAKE_CASE =sock.recv(1024)
if not data:
break
out_file.write(a__)
print('''Successfully received the file''')
sock.close()
print('''Connection closed''')
if __name__ == "__main__":
main() | 691 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
snake_case_ : Dict = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
snake_case_ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 691 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : int = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 'trocr'
_lowerCAmelCase = ['past_key_values']
_lowerCAmelCase = {
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__( self , lowerCamelCase=50265 , lowerCamelCase=1024 , lowerCamelCase=12 , lowerCamelCase=16 , lowerCamelCase=4096 , lowerCamelCase="gelu" , lowerCamelCase=512 , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=0.0 , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=2 , **lowerCamelCase , ) -> Optional[int]:
"""simple docstring"""
snake_case__ : List[str] = vocab_size
snake_case__ : List[Any] = d_model
snake_case__ : Optional[int] = decoder_layers
snake_case__ : List[str] = decoder_attention_heads
snake_case__ : List[str] = decoder_ffn_dim
snake_case__ : Optional[int] = activation_function
snake_case__ : Any = max_position_embeddings
snake_case__ : List[Any] = dropout
snake_case__ : Any = attention_dropout
snake_case__ : Tuple = activation_dropout
snake_case__ : Optional[int] = init_std
snake_case__ : int = decoder_layerdrop
snake_case__ : Any = use_cache
snake_case__ : Optional[int] = scale_embedding
snake_case__ : Optional[Any] = use_learned_position_embeddings
snake_case__ : List[str] = layernorm_embedding
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , decoder_start_token_id=lowerCamelCase , **lowerCamelCase , )
| 719 |
'''simple docstring'''
from math import isqrt
def _A ( snake_case__ : int ):
return all(number % divisor != 0 for divisor in range(2 , isqrt(snake_case__ ) + 1 ) )
def _A ( snake_case__ : int = 10**6 ):
snake_case__ : str = 0
snake_case__ : List[str] = 1
snake_case__ : str = 7
while prime_candidate < max_prime:
primes_count += is_prime(snake_case__ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 694 | 0 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_lowercase = 8
def __UpperCamelCase ( a : Tuple , a : List[str]=BITS ) ->str:
snake_case = x.device
snake_case = (x * 255).int().clamp(0 , 255 )
snake_case = 2 ** torch.arange(bits - 1 , -1 , -1 , device=a )
snake_case = rearrange(a , '''d -> d 1 1''' )
snake_case = rearrange(a , '''b c h w -> b c 1 h w''' )
snake_case = ((x & mask) != 0).float()
snake_case = rearrange(a , '''b c d h w -> b (c d) h w''' )
snake_case = bits * 2 - 1
return bits
def __UpperCamelCase ( a : Dict , a : str=BITS ) ->Optional[Any]:
snake_case = x.device
snake_case = (x > 0).int()
snake_case = 2 ** torch.arange(bits - 1 , -1 , -1 , device=a , dtype=torch.intaa )
snake_case = rearrange(a , '''d -> d 1 1''' )
snake_case = rearrange(a , '''b (c d) h w -> b c d h w''' , d=8 )
snake_case = reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''' )
return (dec / 255).clamp(0.0 , 1.0 )
def __UpperCamelCase ( self : Optional[int] , a : torch.FloatTensor , a : int , a : torch.FloatTensor , a : float = 0.0 , a : bool = True , a : Dict=None , a : bool = True , ) ->Union[DDIMSchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
snake_case = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
snake_case = self.alphas_cumprod[timestep]
snake_case = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
snake_case = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
snake_case = self.bit_scale
if self.config.clip_sample:
snake_case = torch.clamp(a , -scale , a )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
snake_case = self._get_variance(a , a )
snake_case = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
snake_case = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
snake_case = model_output.device if torch.is_tensor(a ) else '''cpu'''
snake_case = torch.randn(model_output.shape , dtype=model_output.dtype , generator=a ).to(a )
snake_case = self._get_variance(a , a ) ** 0.5 * eta * noise
snake_case = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=a , pred_original_sample=a )
def __UpperCamelCase ( self : Any , a : torch.FloatTensor , a : int , a : torch.FloatTensor , a : Optional[int]="epsilon" , a : str=None , a : bool = True , ) ->Union[DDPMSchedulerOutput, Tuple]:
snake_case = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
snake_case , snake_case = torch.split(a , sample.shape[1] , dim=1 )
else:
snake_case = None
# 1. compute alphas, betas
snake_case = self.alphas_cumprod[t]
snake_case = self.alphas_cumprod[t - 1] if t > 0 else self.one
snake_case = 1 - alpha_prod_t
snake_case = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
snake_case = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
snake_case = model_output
else:
raise ValueError(f"""Unsupported prediction_type {prediction_type}.""" )
# 3. Clip "predicted x_0"
snake_case = self.bit_scale
if self.config.clip_sample:
snake_case = torch.clamp(a , -scale , a )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
snake_case = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
snake_case = 0
if t > 0:
snake_case = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=a ).to(model_output.device )
snake_case = (self._get_variance(a , predicted_variance=a ) ** 0.5) * noise
snake_case = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=a , pred_original_sample=a )
class _lowercase ( __a ):
def __init__( self , A__ , A__ , A__ = 1.0 , ) -> Dict:
super().__init__()
snake_case = bit_scale
snake_case = (
ddim_bit_scheduler_step if isinstance(A__ , A__ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=A__ , scheduler=A__ )
@torch.no_grad()
def __call__( self , A__ = 2_56 , A__ = 2_56 , A__ = 50 , A__ = None , A__ = 1 , A__ = "pil" , A__ = True , **A__ , ) -> Union[Tuple, ImagePipelineOutput]:
snake_case = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=A__ , )
snake_case = decimal_to_bits(A__ ) * self.bit_scale
snake_case = latents.to(self.device )
self.scheduler.set_timesteps(A__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
snake_case = self.unet(A__ , A__ ).sample
# compute the previous noisy sample x_t -> x_t-1
snake_case = self.scheduler.step(A__ , A__ , A__ ).prev_sample
snake_case = bits_to_decimal(A__ )
if output_type == "pil":
snake_case = self.numpy_to_pil(A__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A__ )
| 342 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_lowercase = logging.get_logger(__name__)
def __UpperCamelCase ( a : Union[tf.Tensor, np.ndarray] ) ->List[int]:
if isinstance(a , np.ndarray ):
return list(tensor.shape )
snake_case = tf.shape(a )
if tensor.shape == tf.TensorShape(a ):
return dynamic
snake_case = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(a )]
def __UpperCamelCase ( a : tf.Tensor , a : Optional[int] = None , a : Optional[str] = None ) ->tf.Tensor:
return tf.nn.softmax(logits=logits + 1e-9 , axis=a , name=a )
def __UpperCamelCase ( a : List[str] , a : Union[str, Any] , a : Tuple , a : List[str]=1e-5 , a : Any=-1 ) ->Dict:
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(a , a ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
snake_case , snake_case = tf.nn.moments(a , axes=[axis] , keepdims=a )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
snake_case = [1] * inputs.shape.rank
snake_case = shape_list(a )[axis]
snake_case = tf.reshape(a , a )
snake_case = tf.reshape(a , a )
# Compute layer normalization using the batch_normalization
# function.
snake_case = tf.nn.batch_normalization(
a , a , a , offset=a , scale=a , variance_epsilon=a , )
return outputs
def __UpperCamelCase ( a : Tuple , a : Union[str, Any]=0 , a : List[str]=-1 ) ->int:
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
snake_case = tf.shape(a )
snake_case = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
snake_case = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(a , a )
def __UpperCamelCase ( a : tf.Tensor ) ->tf.Tensor:
if not isinstance(a , tf.Tensor ):
snake_case = tf.convert_to_tensor(a ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
snake_case = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
snake_case = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
snake_case = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def __UpperCamelCase ( a : tf.Tensor , a : int , a : str = "input_ids" ) ->None:
tf.debugging.assert_less(
a , tf.cast(a , dtype=tensor.dtype ) , message=(
f"""The maximum value of {tensor_name} ({tf.math.reduce_max(a )}) must be smaller than the embedding """
f"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
) , )
def __UpperCamelCase ( a : Tuple , a : List[str] , a : Tuple ) ->Dict:
snake_case = 6_4512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
snake_case = [x for x in data if len(a ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
f"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
f"""bytes: {bad_attributes}""" )
snake_case = np.asarray(a )
snake_case = 1
snake_case = np.array_split(a , a )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
snake_case = np.array_split(a , a )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(a ):
snake_case = chunk_data
else:
snake_case = data
def __UpperCamelCase ( a : Optional[int] , a : Tuple ) ->Tuple:
if name in group.attrs:
snake_case = [n.decode('''utf8''' ) if hasattr(a , '''decode''' ) else n for n in group.attrs[name]]
else:
snake_case = []
snake_case = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(a , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def __UpperCamelCase ( a : Any ) ->List[Any]:
def _expand_single_ad_tensor(a : List[Any] ):
if isinstance(a , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(a , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , a )
| 342 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( _UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ = "dpr"
def __init__( self , _SCREAMING_SNAKE_CASE=30_522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3_072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1e-1_2 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE = 0 , **_SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = projection_dim
__UpperCamelCase = position_embedding_type
| 718 |
def _a ( __lowercase , __lowercase = 0 ) -> list:
"""simple docstring"""
__UpperCamelCase = length or len(__lowercase )
__UpperCamelCase = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
__UpperCamelCase , __UpperCamelCase = list_data[i + 1], list_data[i]
__UpperCamelCase = True
return list_data if not swapped else bubble_sort(__lowercase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 567 | 0 |
'''simple docstring'''
from random import shuffle
import tensorflow as tf
from numpy import array
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = int(A__ )
assert noofclusters < len(A__ )
# Find out the dimensionality
__lowercase = len(vectors[0] )
# Will help select random centroids from among the available vectors
__lowercase = list(range(len(A__ ) ) )
shuffle(A__ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
__lowercase = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
__lowercase = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
__lowercase = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(A__ )
]
##These nodes will assign the centroid Variables the appropriate
##values
__lowercase = tf.placeholder('''float64''' , [dim] )
__lowercase = []
for centroid in centroids:
cent_assigns.append(tf.assign(A__ , A__ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
__lowercase = [tf.Variable(0 ) for i in range(len(A__ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
__lowercase = tf.placeholder('''int32''' )
__lowercase = []
for assignment in assignments:
cluster_assigns.append(tf.assign(A__ , A__ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
__lowercase = tf.placeholder('''float''' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
__lowercase = tf.reduce_mean(A__ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
__lowercase = tf.placeholder('''float''' , [dim] )
__lowercase = tf.placeholder('''float''' , [dim] )
__lowercase = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(A__ , A__ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
__lowercase = tf.placeholder('''float''' , [noofclusters] )
__lowercase = tf.argmin(A__ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
__lowercase = tf.initialize_all_variables()
# Initialize all variables
sess.run(A__ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
__lowercase = 100
for _ in range(A__ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(A__ ) ):
__lowercase = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
__lowercase = [
sess.run(A__ , feed_dict={va: vect, va: sess.run(A__ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
__lowercase = sess.run(
A__ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(A__ ):
# Collect all the vectors assigned to this cluster
__lowercase = [
vectors[i]
for i in range(len(A__ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
__lowercase = sess.run(
A__ , feed_dict={mean_input: array(A__ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
__lowercase = sess.run(A__ )
__lowercase = sess.run(A__ )
return centroids, assignments
| 41 |
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
A_ : int =np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
A_ : Tuple =[0, 2_5, 5_0]
A_ : int =[2_5, 5_0, 7_5]
A_ : List[str] =fuzz.membership.trimf(X, abca)
A_ : Any =fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
A_ : Optional[Any] =np.ones(7_5)
A_ : int =np.zeros((7_5,))
# 1. Union = max(µA(x), µB(x))
A_ : Optional[Any] =fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
A_ : Union[str, Any] =fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
A_ : List[Any] =fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
A_ : int =fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
A_ : Optional[Any] =young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
A_ : List[Any] =young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
A_ : Union[str, Any] =fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
A_ : Optional[Any] =fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("""Young""")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("""Middle aged""")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("""union""")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("""intersection""")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("""complement_a""")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("""difference a/b""")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("""alg_sum""")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("""alg_product""")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("""bdd_sum""")
plt.grid(True)
plt.subplot(4, 3, 1_0)
plt.plot(X, bdd_difference)
plt.title("""bdd_difference""")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 650 | 0 |
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
lowercase_ = logging.get_logger(__name__)
class __a ( SCREAMING_SNAKE_CASE ):
def __init__( self : Optional[int] , *snake_case_ : str , **snake_case_ : List[str])-> None:
warnings.warn(
"""The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DPTImageProcessor instead.""" , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_)
| 712 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
lowercase_ = logging.get_logger(__name__)
class __a ( SCREAMING_SNAKE_CASE ):
def __init__( self : Optional[Any] , *snake_case_ : List[str] , **snake_case_ : Union[str, Any])-> None:
warnings.warn(
"""The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PoolFormerImageProcessor instead.""" , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_)
| 456 | 0 |
def lowerCAmelCase_ ( _snake_case : Dict ) -> Any:
'''simple docstring'''
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def lowerCAmelCase_ ( _snake_case : dict[int, list[int]] ) -> list[tuple[int, int]]:
'''simple docstring'''
__magic_name__ : List[Any] = 0
__magic_name__ : Tuple = len(_snake_case ) # No of vertices in graph
__magic_name__ : Union[str, Any] = [0] * n
__magic_name__ : int = [False] * n
def dfs(_snake_case : str , _snake_case : Union[str, Any] , _snake_case : List[str] , _snake_case : Dict ):
__magic_name__ : Dict = True
__magic_name__ : List[str] = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(_snake_case , _snake_case , _snake_case , id_ )
__magic_name__ : List[str] = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
__magic_name__ : Any = min(low[at] , low[to] )
__magic_name__ : list[tuple[int, int]] = []
for i in range(_snake_case ):
if not visited[i]:
dfs(_snake_case , -1 , _snake_case , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 124 |
from __future__ import annotations
from typing import Any
class _snake_case ( snake_case ):
pass
class _snake_case :
def __init__( self , _a ):
__magic_name__ : Any = data
__magic_name__ : Node | None = None
def __iter__( self ):
__magic_name__ : Any = self
__magic_name__ : Union[str, Any] = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(_a )
yield node.data
__magic_name__ : Any = node.next_node
@property
def SCREAMING_SNAKE_CASE ( self ):
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
snake_case : str = Node(1)
snake_case : Dict = Node(2)
snake_case : List[str] = Node(3)
snake_case : Union[str, Any] = Node(4)
print(root_node.has_loop) # False
snake_case : List[Any] = root_node.next_node
print(root_node.has_loop) # True
snake_case : Union[str, Any] = Node(5)
snake_case : Any = Node(6)
snake_case : Any = Node(5)
snake_case : Optional[int] = Node(6)
print(root_node.has_loop) # False
snake_case : str = Node(1)
print(root_node.has_loop) # False
| 124 | 1 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
A__: Union[str, Any] = 3
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int:
print("""Generating primitive root of p""" )
while True:
_a : List[str] =random.randrange(3 ,__A )
if pow(__A ,2 ,__A ) == 1:
continue
if pow(__A ,__A ,__A ) == 1:
continue
return g
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print("""Generating prime p...""" )
_a : Any =rabin_miller.generate_large_prime(__A ) # select large prime number.
_a : List[str] =primitive_root(__A ) # one primitive root on modulo p.
_a : List[str] =random.randrange(3 ,__A ) # private_key -> have to be greater than 2 for safety.
_a : List[str] =cryptomath.find_mod_inverse(pow(__A ,__A ,__A ) ,__A )
_a : Union[str, Any] =(key_size, e_a, e_a, p)
_a : Optional[Any] =(key_size, d)
return public_key, private_key
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : int ) -> None:
if os.path.exists(F"{name}_pubkey.txt" ) or os.path.exists(F"{name}_privkey.txt" ):
print("""\nWARNING:""" )
print(
F"\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
_a : str =generate_key(__A )
print(F"\nWriting public key to file {name}_pubkey.txt..." )
with open(F"{name}_pubkey.txt" ,"""w""" ) as fo:
fo.write(F"{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}" )
print(F"Writing private key to file {name}_privkey.txt..." )
with open(F"{name}_privkey.txt" ,"""w""" ) as fo:
fo.write(F"{private_key[0]},{private_key[1]}" )
def SCREAMING_SNAKE_CASE_ ( ) -> None:
print("""Making key files...""" )
make_key_files("""elgamal""" ,2048 )
print("""Key files generation successful""" )
if __name__ == "__main__":
main()
| 708 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str = " " ) -> list:
_a : int =[]
_a : Tuple =0
for index, char in enumerate(_UpperCAmelCase ):
if char == separator:
split_words.append(string[last_index:index] )
_a : Union[str, Any] =index + 1
elif index + 1 == len(_UpperCAmelCase ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 506 | 0 |
SCREAMING_SNAKE_CASE : Optional[Any] = tuple[float, float, float]
SCREAMING_SNAKE_CASE : int = tuple[float, float, float]
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Vectorad:
_lowercase : int = end_pointa[0] - end_pointa[0]
_lowercase : List[Any] = end_pointa[1] - end_pointa[1]
_lowercase : Optional[int] = end_pointa[2] - end_pointa[2]
return (x, y, z)
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Vectorad:
_lowercase : Optional[int] = ab[1] * ac[2] - ab[2] * ac[1] # *i
_lowercase : Dict = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
_lowercase : Tuple = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> bool:
return tuple(round(lowerCamelCase_ , lowerCamelCase_ ) for x in vector ) == (0, 0, 0)
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 10 ) -> bool:
_lowercase : int = create_vector(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : Any = create_vector(lowerCamelCase_ , lowerCamelCase_ )
return is_zero_vector(get_ad_vectors_cross(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
| 89 |
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self , __A ):
__a = set_counts
__a = max(__A )
__a = len(__A )
__a = [1] * num_sets
__a = list(range(__A ) )
def snake_case_ ( self , __A , __A ):
__a = self.get_parent(__A )
__a = self.get_parent(__A )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
__a = 0
__a = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
__a = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
__a = 0
__a = src_parent
__a = self.set_counts[src_parent]
__a = max(self.max_set , __A )
return True
def snake_case_ ( self , __A ):
if self.parents[disj_set] == disj_set:
return disj_set
__a = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 99 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def UpperCamelCase ( _lowerCAmelCase : Optional[int] ):
__a = SwinvaConfig()
__a = swinva_name.split("""_""" )
__a = name_split[1]
if "to" in name_split[3]:
__a = int(name_split[3][-3:] )
else:
__a = int(name_split[3] )
if "to" in name_split[2]:
__a = int(name_split[2][-2:] )
else:
__a = int(name_split[2][6:] )
if model_size == "tiny":
__a = 96
__a = (2, 2, 6, 2)
__a = (3, 6, 12, 24)
elif model_size == "small":
__a = 96
__a = (2, 2, 18, 2)
__a = (3, 6, 12, 24)
elif model_size == "base":
__a = 128
__a = (2, 2, 18, 2)
__a = (4, 8, 16, 32)
else:
__a = 192
__a = (2, 2, 18, 2)
__a = (6, 12, 24, 48)
if "to" in swinva_name:
__a = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
__a = 21841
__a = '''huggingface/label-files'''
__a = '''imagenet-22k-id2label.json'''
__a = json.load(open(hf_hub_download(a_ , a_ , repo_type="""dataset""" ) , """r""" ) )
__a = {int(a_ ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
else:
__a = 1000
__a = '''huggingface/label-files'''
__a = '''imagenet-1k-id2label.json'''
__a = json.load(open(hf_hub_download(a_ , a_ , repo_type="""dataset""" ) , """r""" ) )
__a = {int(a_ ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
__a = img_size
__a = num_classes
__a = embed_dim
__a = depths
__a = num_heads
__a = window_size
return config
def UpperCamelCase ( _lowerCAmelCase : List[str] ):
if "patch_embed.proj" in name:
__a = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__a = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
__a = '''encoder.''' + name
if "attn.proj" in name:
__a = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
__a = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
__a = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__a = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__a = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__a = name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
__a = name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
__a = name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
__a = name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
__a = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if name == "norm.weight":
__a = '''layernorm.weight'''
if name == "norm.bias":
__a = '''layernorm.bias'''
if "head" in name:
__a = name.replace("""head""" , """classifier""" )
else:
__a = '''swinv2.''' + name
return name
def UpperCamelCase ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple ):
for key in orig_state_dict.copy().keys():
__a = orig_state_dict.pop(a_ )
if "mask" in key:
continue
elif "qkv" in key:
__a = key.split(""".""" )
__a = int(key_split[1] )
__a = int(key_split[3] )
__a = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__a = val[:dim, :]
__a = val[dim : dim * 2, :]
__a = val[-dim:, :]
else:
__a = val[:dim]
__a = val[
dim : dim * 2
]
__a = val[-dim:]
else:
__a = val
return orig_state_dict
def UpperCamelCase ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict ):
__a = timm.create_model(a_ , pretrained=a_ )
timm_model.eval()
__a = get_swinva_config(a_ )
__a = SwinvaForImageClassification(a_ )
model.eval()
__a = convert_state_dict(timm_model.state_dict() , a_ )
model.load_state_dict(a_ )
__a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__a = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swinva_name.replace("""_""" , """-""" ) ) )
__a = Image.open(requests.get(a_ , stream=a_ ).raw )
__a = image_processor(images=a_ , return_tensors="""pt""" )
__a = timm_model(inputs["""pixel_values"""] )
__a = model(**a_ ).logits
assert torch.allclose(a_ , a_ , atol=1E-3 )
print(f"""Saving model {swinva_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(a_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(a_ )
model.push_to_hub(
repo_path_or_name=Path(a_ , a_ ) , organization="""nandwalritik""" , commit_message="""Add model""" , )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swinv2_name""",
default="""swinv2_tiny_patch4_window8_256""",
type=str,
help="""Name of the Swinv2 timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
__A = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 718 | """simple docstring"""
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
__A = """scheduler_config.json"""
class a ( A_ ):
A_ : int = 1
A_ : Any = 2
A_ : List[str] = 3
A_ : int = 4
A_ : List[str] = 5
A_ : Optional[int] = 6
A_ : Optional[int] = 7
A_ : int = 8
A_ : Union[str, Any] = 9
A_ : int = 10
A_ : Dict = 11
A_ : Union[str, Any] = 12
A_ : Tuple = 13
A_ : Optional[Any] = 14
@dataclass
class a ( A_ ):
A_ : torch.FloatTensor
class a :
A_ : str = SCHEDULER_CONFIG_NAME
A_ : Union[str, Any] = []
A_ : Any = True
@classmethod
def lowerCAmelCase_ ( cls : Dict , lowerCamelCase_ : Dict[str, Any] = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : List[str]=False , **lowerCamelCase_ : Union[str, Any] , ) -> Any:
__a , __a , __a = cls.load_config(
pretrained_model_name_or_path=lowerCamelCase_ , subfolder=lowerCamelCase_ , return_unused_kwargs=lowerCamelCase_ , return_commit_hash=lowerCamelCase_ , **lowerCamelCase_ , )
return cls.from_config(lowerCamelCase_ , return_unused_kwargs=lowerCamelCase_ , **lowerCamelCase_ )
def lowerCAmelCase_ ( self : Optional[int] , lowerCamelCase_ : Union[str, os.PathLike] , lowerCamelCase_ : bool = False , **lowerCamelCase_ : Any ) -> Dict:
self.save_config(save_directory=lowerCamelCase_ , push_to_hub=lowerCamelCase_ , **lowerCamelCase_ )
@property
def lowerCAmelCase_ ( self : List[str] ) -> Any:
return self._get_compatibles()
@classmethod
def lowerCAmelCase_ ( cls : Union[str, Any] ) -> str:
__a = list(set([cls.__name__] + cls._compatibles ) )
__a = importlib.import_module(__name__.split(""".""" )[0] )
__a = [
getattr(lowerCamelCase_ , lowerCamelCase_ ) for c in compatible_classes_str if hasattr(lowerCamelCase_ , lowerCamelCase_ )
]
return compatible_classes
| 173 | 0 |
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
__lowerCAmelCase = {
"169M": 12,
"430M": 24,
"1B5": 24,
"3B": 32,
"7B": 32,
"14B": 40,
}
__lowerCAmelCase = {
"169M": 7_68,
"430M": 10_24,
"1B5": 20_48,
"3B": 25_60,
"7B": 40_96,
"14B": 51_20,
}
def __UpperCamelCase ( lowercase_ : str ):
"""simple docstring"""
a_ = list(state_dict.keys() )
for name in state_dict_keys:
a_ = state_dict.pop(lowerCamelCase_ )
# emb -> embedding
if name.startswith('emb.' ):
a_ = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
a_ = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
a_ = re.sub(R'blocks\.(\d+)\.att' , R'blocks.\1.attention' , lowerCamelCase_ )
# ffn -> feed_forward
a_ = re.sub(R'blocks\.(\d+)\.ffn' , R'blocks.\1.feed_forward' , lowerCamelCase_ )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
a_ = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
a_ = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
a_ = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
a_ = "rwkv." + name
a_ = weight
return state_dict
def __UpperCamelCase ( lowercase_ : Optional[int] , lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : Optional[int]=None , lowercase_ : str=None , lowercase_ : Optional[Any]=False , lowercase_ : Optional[Any]=None ):
"""simple docstring"""
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
a_ = 50_277
a_ = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
a_ = PreTrainedTokenizerFast(tokenizer_file=lowerCamelCase_ )
a_ = len(lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
# 2. Build the config
a_ = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
a_ = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(F'`size` should be one of {possible_sizes}, got {size}.' )
a_ = RwkvConfig(
vocab_size=lowerCamelCase_ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(lowerCamelCase_ )
# 3. Download model file then convert state_dict
a_ = hf_hub_download(lowerCamelCase_ , lowerCamelCase_ )
a_ = torch.load(lowerCamelCase_ , map_location='cpu' )
a_ = convert_state_dict(lowerCamelCase_ )
# 4. Split in shards and save
a_ = shard_checkpoint(lowerCamelCase_ )
for shard_file, shard in shards.items():
torch.save(lowerCamelCase_ , os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
if index is not None:
a_ = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
# Save the index as well
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as f:
a_ = json.dumps(lowerCamelCase_ , indent=2 , sort_keys=lowerCamelCase_ ) + "\n"
f.write(lowerCamelCase_ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
a_ = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
a_ = torch.load(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
a_ = AutoModelForCausalLM.from_pretrained(lowerCamelCase_ )
model.push_to_hub(lowerCamelCase_ , max_shard_size='2GB' )
tokenizer.push_to_hub(lowerCamelCase_ )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--repo_id", default=None, type=str, required=True, help="Repo ID from which to pull the checkpoint."
)
parser.add_argument(
"--checkpoint_file", default=None, type=str, required=True, help="Name of the checkpoint file in the repo."
)
parser.add_argument(
"--output_dir", default=None, type=str, required=True, help="Where to save the converted model."
)
parser.add_argument(
"--tokenizer_file",
default=None,
type=str,
help="Path to the tokenizer file to use (if not provided, only the model is converted).",
)
parser.add_argument(
"--size",
default=None,
type=str,
help="Size of the model. Will be inferred from the `checkpoint_file` if not passed.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Push to the Hub the converted model.",
)
parser.add_argument(
"--model_name",
default=None,
type=str,
help="Name of the pushed model on the Hub, including the username / organization.",
)
__lowerCAmelCase = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 536 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
A = logging.get_logger(__name__)
A = {
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json',
}
class _a ( SCREAMING_SNAKE_CASE__):
__magic_name__ = """bloom"""
__magic_name__ = ["""past_key_values"""]
__magic_name__ = {
"""num_hidden_layers""": """n_layer""",
"""num_attention_heads""": """n_head""",
}
def __init__( self : Tuple , _lowercase : List[Any]=250880 , _lowercase : int=64 , _lowercase : Optional[Any]=2 , _lowercase : Dict=8 , _lowercase : List[str]=1E-5 , _lowercase : List[Any]=0.02 , _lowercase : Optional[Any]=True , _lowercase : Dict=1 , _lowercase : Union[str, Any]=2 , _lowercase : str=False , _lowercase : List[Any]=0.0 , _lowercase : Tuple=0.0 , _lowercase : Dict=1 , _lowercase : int=False , **_lowercase : int , ) -> List[str]:
snake_case : Any = vocab_size
# Backward compatibility with n_embed kwarg
snake_case : Any = kwargs.pop("n_embed" , _lowercase )
snake_case : Tuple = hidden_size if n_embed is None else n_embed
snake_case : Optional[Any] = n_layer
snake_case : Optional[Any] = n_head
snake_case : Union[str, Any] = layer_norm_epsilon
snake_case : int = initializer_range
snake_case : int = use_cache
snake_case : int = pretraining_tp
snake_case : Tuple = apply_residual_connection_post_layernorm
snake_case : Union[str, Any] = hidden_dropout
snake_case : Optional[Any] = attention_dropout
snake_case : List[Any] = bos_token_id
snake_case : Any = eos_token_id
snake_case : Optional[Any] = slow_but_exact
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
class _a ( SCREAMING_SNAKE_CASE__):
__magic_name__ = version.parse("""1.12""")
def __init__( self : int , _lowercase : PretrainedConfig , _lowercase : str = "default" , _lowercase : List[PatchingSpec] = None , _lowercase : bool = False , ) -> Dict:
super().__init__(_lowercase , task=_lowercase , patching_specs=_lowercase , use_past=_lowercase )
if not getattr(self._config , "pad_token_id" , _lowercase ):
# TODO: how to do that better?
snake_case : int = 0
@property
def __lowercase ( self : str ) -> Mapping[str, Mapping[int, str]]:
snake_case : int = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(_lowercase , direction="inputs" , inverted_values_shape=_lowercase )
snake_case : Optional[int] = {0: "batch", 1: "past_sequence + sequence"}
else:
snake_case : Dict = {0: "batch", 1: "sequence"}
return common_inputs
@property
def __lowercase ( self : int ) -> int:
return self._config.n_layer
@property
def __lowercase ( self : Dict ) -> int:
return self._config.n_head
@property
def __lowercase ( self : Union[str, Any] ) -> float:
return 1E-3
def __lowercase ( self : List[str] , _lowercase : "PreTrainedTokenizer" , _lowercase : int = -1 , _lowercase : int = -1 , _lowercase : bool = False , _lowercase : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
snake_case : int = super(_lowercase , self ).generate_dummy_inputs(
_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase )
# We need to order the input in the way they appears in the forward()
snake_case : Any = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
snake_case , snake_case : List[str] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
snake_case : Optional[Any] = seqlen + 2
snake_case : Optional[int] = self._config.hidden_size // self.num_attention_heads
snake_case : List[str] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
snake_case : Any = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
snake_case : Dict = [
(torch.zeros(_lowercase ), torch.zeros(_lowercase )) for _ in range(self.num_layers )
]
snake_case : Tuple = common_inputs["attention_mask"]
if self.use_past:
snake_case : Any = ordered_inputs["attention_mask"].dtype
snake_case : List[str] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(_lowercase , _lowercase , dtype=_lowercase )] , dim=1 )
return ordered_inputs
@property
def __lowercase ( self : str ) -> int:
return 13
| 449 | 0 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 / sqrt(2 ) ):
"""simple docstring"""
lowercase__ : str = tau * frequency / samplerate
lowercase__ : Dict = sin(lowerCamelCase__ )
lowercase__ : Optional[Any] = cos(lowerCamelCase__ )
lowercase__ : Optional[int] = _sin / (2 * q_factor)
lowercase__ : List[str] = (1 - _cos) / 2
lowercase__ : Union[str, Any] = 1 - _cos
lowercase__ : Dict = 1 + alpha
lowercase__ : Dict = -2 * _cos
lowercase__ : Tuple = 1 - alpha
lowercase__ : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 / sqrt(2 ) ):
"""simple docstring"""
lowercase__ : Union[str, Any] = tau * frequency / samplerate
lowercase__ : Any = sin(lowerCamelCase__ )
lowercase__ : Tuple = cos(lowerCamelCase__ )
lowercase__ : int = _sin / (2 * q_factor)
lowercase__ : Optional[Any] = (1 + _cos) / 2
lowercase__ : Optional[Any] = -1 - _cos
lowercase__ : str = 1 + alpha
lowercase__ : str = -2 * _cos
lowercase__ : int = 1 - alpha
lowercase__ : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 / sqrt(2 ) ):
"""simple docstring"""
lowercase__ : Union[str, Any] = tau * frequency / samplerate
lowercase__ : Tuple = sin(lowerCamelCase__ )
lowercase__ : List[Any] = cos(lowerCamelCase__ )
lowercase__ : Tuple = _sin / (2 * q_factor)
lowercase__ : Tuple = _sin / 2
lowercase__ : Dict = 0
lowercase__ : Optional[Any] = -ba
lowercase__ : List[Any] = 1 + alpha
lowercase__ : Optional[int] = -2 * _cos
lowercase__ : List[Any] = 1 - alpha
lowercase__ : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 / sqrt(2 ) ):
"""simple docstring"""
lowercase__ : int = tau * frequency / samplerate
lowercase__ : List[str] = sin(lowerCamelCase__ )
lowercase__ : List[str] = cos(lowerCamelCase__ )
lowercase__ : Dict = _sin / (2 * q_factor)
lowercase__ : Optional[Any] = 1 - alpha
lowercase__ : str = -2 * _cos
lowercase__ : List[Any] = 1 + alpha
lowercase__ : str = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 / sqrt(2 ) , ):
"""simple docstring"""
lowercase__ : List[Any] = tau * frequency / samplerate
lowercase__ : List[str] = sin(lowerCamelCase__ )
lowercase__ : Optional[int] = cos(lowerCamelCase__ )
lowercase__ : str = _sin / (2 * q_factor)
lowercase__ : Optional[int] = 10 ** (gain_db / 40)
lowercase__ : Tuple = 1 + alpha * big_a
lowercase__ : List[str] = -2 * _cos
lowercase__ : int = 1 - alpha * big_a
lowercase__ : Optional[int] = 1 + alpha / big_a
lowercase__ : Optional[int] = -2 * _cos
lowercase__ : str = 1 - alpha / big_a
lowercase__ : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 / sqrt(2 ) , ):
"""simple docstring"""
lowercase__ : str = tau * frequency / samplerate
lowercase__ : Any = sin(lowerCamelCase__ )
lowercase__ : Any = cos(lowerCamelCase__ )
lowercase__ : Dict = _sin / (2 * q_factor)
lowercase__ : Tuple = 10 ** (gain_db / 40)
lowercase__ : Any = (big_a + 1) - (big_a - 1) * _cos
lowercase__ : Dict = (big_a + 1) + (big_a - 1) * _cos
lowercase__ : Dict = (big_a - 1) - (big_a + 1) * _cos
lowercase__ : Union[str, Any] = (big_a - 1) + (big_a + 1) * _cos
lowercase__ : Optional[int] = 2 * sqrt(lowerCamelCase__ ) * alpha
lowercase__ : int = big_a * (pmc + aaa)
lowercase__ : List[str] = 2 * big_a * mpc
lowercase__ : Optional[Any] = big_a * (pmc - aaa)
lowercase__ : Tuple = ppmc + aaa
lowercase__ : Any = -2 * pmpc
lowercase__ : Optional[int] = ppmc - aaa
lowercase__ : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 / sqrt(2 ) , ):
"""simple docstring"""
lowercase__ : Tuple = tau * frequency / samplerate
lowercase__ : Any = sin(lowerCamelCase__ )
lowercase__ : Optional[int] = cos(lowerCamelCase__ )
lowercase__ : Any = _sin / (2 * q_factor)
lowercase__ : Dict = 10 ** (gain_db / 40)
lowercase__ : List[str] = (big_a + 1) - (big_a - 1) * _cos
lowercase__ : Dict = (big_a + 1) + (big_a - 1) * _cos
lowercase__ : Any = (big_a - 1) - (big_a + 1) * _cos
lowercase__ : Tuple = (big_a - 1) + (big_a + 1) * _cos
lowercase__ : int = 2 * sqrt(lowerCamelCase__ ) * alpha
lowercase__ : Optional[Any] = big_a * (ppmc + aaa)
lowercase__ : Tuple = -2 * big_a * pmpc
lowercase__ : List[Any] = big_a * (ppmc - aaa)
lowercase__ : Optional[Any] = pmc + aaa
lowercase__ : Tuple = 2 * mpc
lowercase__ : int = pmc - aaa
lowercase__ : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 706 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_mgp_str''': ['''MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MgpstrConfig'''],
'''processing_mgp_str''': ['''MgpstrProcessor'''],
'''tokenization_mgp_str''': ['''MgpstrTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MgpstrModel''',
'''MgpstrPreTrainedModel''',
'''MgpstrForSceneTextRecognition''',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 81 | 0 |
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def A_ ( snake_case , snake_case , snake_case ):
SCREAMING_SNAKE_CASE:int = 1.5
SCREAMING_SNAKE_CASE:Optional[Any] = int(factor * num_class_images )
SCREAMING_SNAKE_CASE:Any = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=snake_case , aesthetic_weight=0.1 )
os.makedirs(F'''{class_data_dir}/images''' , exist_ok=snake_case )
if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
SCREAMING_SNAKE_CASE:Any = client.query(text=snake_case )
if len(snake_case ) >= factor * num_class_images or num_images > 1e4:
break
else:
SCREAMING_SNAKE_CASE:Union[str, Any] = int(factor * num_images )
SCREAMING_SNAKE_CASE:List[Any] = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=snake_case , aesthetic_weight=0.1 , )
SCREAMING_SNAKE_CASE:int = 0
SCREAMING_SNAKE_CASE:Dict = 0
SCREAMING_SNAKE_CASE:Optional[int] = tqdm(desc="downloading real regularization images" , total=snake_case )
with open(F'''{class_data_dir}/caption.txt''' , "w" ) as fa, open(F'''{class_data_dir}/urls.txt''' , "w" ) as fa, open(
F'''{class_data_dir}/images.txt''' , "w" ) as fa:
while total < num_class_images:
SCREAMING_SNAKE_CASE:List[str] = class_images[count]
count += 1
try:
SCREAMING_SNAKE_CASE:List[Any] = requests.get(images["url"] )
if img.status_code == 200:
SCREAMING_SNAKE_CASE:Optional[int] = Image.open(BytesIO(img.content ) )
with open(F'''{class_data_dir}/images/{total}.jpg''' , "wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(F'''{class_data_dir}/images/{total}.jpg''' + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def A_ ( ):
SCREAMING_SNAKE_CASE:List[Any] = argparse.ArgumentParser("" , add_help=snake_case )
parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=snake_case , type=snake_case )
parser.add_argument("--class_data_dir" , help="path to save images" , required=snake_case , type=snake_case )
parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=snake_case )
return parser.parse_args()
if __name__ == "__main__":
A_ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 143 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
A_ = "examples/"
A_ = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
A_ = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
A_ = "README.md"
def A_ ( snake_case , snake_case , snake_case ):
with open(snake_case , "r" , encoding="utf-8" , newline="\n" ) as f:
SCREAMING_SNAKE_CASE:List[str] = f.read()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Any = REPLACE_PATTERNS[pattern]
SCREAMING_SNAKE_CASE:Tuple = replace.replace("VERSION" , snake_case )
SCREAMING_SNAKE_CASE:Optional[Any] = re_pattern.sub(snake_case , snake_case )
with open(snake_case , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(snake_case )
def A_ ( snake_case ):
for folder, directories, fnames in os.walk(snake_case ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(snake_case , snake_case ) , snake_case , pattern="examples" )
def A_ ( snake_case , snake_case=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(snake_case , snake_case , snake_case )
if not patch:
update_version_in_examples(snake_case )
def A_ ( ):
SCREAMING_SNAKE_CASE:int = "🤗 Transformers currently provides the following architectures"
SCREAMING_SNAKE_CASE:int = "1. Want to contribute a new model?"
with open(snake_case , "r" , encoding="utf-8" , newline="\n" ) as f:
SCREAMING_SNAKE_CASE:List[Any] = f.readlines()
# Find the start of the list.
SCREAMING_SNAKE_CASE:Dict = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE:str = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
SCREAMING_SNAKE_CASE:Optional[Any] = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(snake_case , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(snake_case )
def A_ ( ):
with open(REPLACE_FILES["init"] , "r" ) as f:
SCREAMING_SNAKE_CASE:str = f.read()
SCREAMING_SNAKE_CASE:Tuple = REPLACE_PATTERNS["init"][0].search(snake_case ).groups()[0]
return packaging.version.parse(snake_case )
def A_ ( snake_case=False ):
SCREAMING_SNAKE_CASE:Dict = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
SCREAMING_SNAKE_CASE:Any = default_version.base_version
elif patch:
SCREAMING_SNAKE_CASE:str = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
SCREAMING_SNAKE_CASE:str = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
SCREAMING_SNAKE_CASE:Optional[int] = input(F'''Which version are you releasing? [{default_version}]''' )
if len(snake_case ) == 0:
SCREAMING_SNAKE_CASE:Dict = default_version
print(F'''Updating version to {version}.''' )
global_version_update(snake_case , patch=snake_case )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def A_ ( ):
SCREAMING_SNAKE_CASE:int = get_version()
SCREAMING_SNAKE_CASE:int = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
SCREAMING_SNAKE_CASE:Optional[Any] = current_version.base_version
# Check with the user we got that right.
SCREAMING_SNAKE_CASE:Any = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(snake_case ) == 0:
SCREAMING_SNAKE_CASE:Union[str, Any] = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(snake_case )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
A_ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 143 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__lowerCamelCase = AltDiffusionPipeline
__lowerCamelCase = TEXT_TO_IMAGE_PARAMS
__lowerCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS
__lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
__lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__: str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
lowerCamelCase__: Tuple = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , )
torch.manual_seed(0 )
lowerCamelCase__: List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
lowerCamelCase__: Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , )
lowerCamelCase__: List[Any] = CLIPTextModel(lowerCamelCase__ )
lowerCamelCase__: List[Any] = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
lowerCamelCase__: Tuple = 77
lowerCamelCase__: Dict = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase_ ( self : Tuple , __a : Optional[int] , __a : Tuple=0 ):
'''simple docstring'''
if str(lowerCamelCase__ ).startswith("""mps""" ):
lowerCamelCase__: Optional[int] = torch.manual_seed(lowerCamelCase__ )
else:
lowerCamelCase__: Dict = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
lowerCamelCase__: Dict = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowerCamelCase__: Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__: Optional[int] = self.get_dummy_components()
torch.manual_seed(0 )
lowerCamelCase__: str = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
lowerCamelCase__: int = RobertaSeriesModelWithTransformation(lowerCamelCase__ )
lowerCamelCase__: Any = text_encoder
lowerCamelCase__: Dict = AltDiffusionPipeline(**lowerCamelCase__ )
lowerCamelCase__: Any = alt_pipe.to(lowerCamelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowerCamelCase__: Dict = self.get_dummy_inputs(lowerCamelCase__ )
lowerCamelCase__: Optional[int] = '''A photo of an astronaut'''
lowerCamelCase__: int = alt_pipe(**lowerCamelCase__ )
lowerCamelCase__: Tuple = output.images
lowerCamelCase__: str = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase__: List[Any] = np.array(
[0.5_748_162, 0.60_447_145, 0.48_821_217, 0.50_100_636, 0.5_431_185, 0.45_763_683, 0.49_657_696, 0.48_132_733, 0.47_573_093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__: List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__: Optional[Any] = self.get_dummy_components()
lowerCamelCase__: Optional[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase__ )
torch.manual_seed(0 )
lowerCamelCase__: Optional[int] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
lowerCamelCase__: int = RobertaSeriesModelWithTransformation(lowerCamelCase__ )
lowerCamelCase__: str = text_encoder
lowerCamelCase__: str = AltDiffusionPipeline(**lowerCamelCase__ )
lowerCamelCase__: Optional[Any] = alt_pipe.to(lowerCamelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowerCamelCase__: List[Any] = self.get_dummy_inputs(lowerCamelCase__ )
lowerCamelCase__: Tuple = alt_pipe(**lowerCamelCase__ )
lowerCamelCase__: int = output.images
lowerCamelCase__: Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase__: List[Any] = np.array(
[0.51_605_093, 0.5_707_241, 0.47_365_507, 0.50_578_886, 0.5_633_877, 0.4_642_503, 0.5_182_081, 0.48_763_484, 0.49_084_237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowerCamelCase__: int = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , safety_checker=lowerCamelCase__ )
lowerCamelCase__: Union[str, Any] = alt_pipe.to(lowerCamelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowerCamelCase__: Any = '''A painting of a squirrel eating a burger'''
lowerCamelCase__: Optional[int] = torch.manual_seed(0 )
lowerCamelCase__: Optional[Any] = alt_pipe([prompt] , generator=lowerCamelCase__ , guidance_scale=6.0 , num_inference_steps=20 , output_type="""np""" )
lowerCamelCase__: Union[str, Any] = output.images
lowerCamelCase__: Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase__: int = np.array([0.1_010, 0.0_800, 0.0_794, 0.0_885, 0.0_843, 0.0_762, 0.0_769, 0.0_729, 0.0_586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowerCamelCase__: Any = DDIMScheduler.from_pretrained("""BAAI/AltDiffusion""" , subfolder="""scheduler""" )
lowerCamelCase__: Optional[int] = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , scheduler=lowerCamelCase__ , safety_checker=lowerCamelCase__ )
lowerCamelCase__: Dict = alt_pipe.to(lowerCamelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowerCamelCase__: Optional[int] = '''A painting of a squirrel eating a burger'''
lowerCamelCase__: Dict = torch.manual_seed(0 )
lowerCamelCase__: List[str] = alt_pipe([prompt] , generator=lowerCamelCase__ , num_inference_steps=2 , output_type="""numpy""" )
lowerCamelCase__: Optional[int] = output.images
lowerCamelCase__: Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase__: str = np.array([0.4_019, 0.4_052, 0.3_810, 0.4_119, 0.3_916, 0.3_982, 0.4_651, 0.4_195, 0.5_323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 718 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __lowerCAmelCase ( _UpperCamelCase ) -> int:
'''simple docstring'''
for param in module.parameters():
lowerCamelCase__: Optional[int] = False
def __lowerCAmelCase ( ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__: Any = """cuda""" if torch.cuda.is_available() else """cpu"""
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowerCamelCase__: Dict = """mps"""
if device == "mps":
print(
"""WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"""
""" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"""
""" with generations.""" )
return device
def __lowerCAmelCase ( _UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: List[str] = plt.imshow(_UpperCamelCase )
fig.axes.get_xaxis().set_visible(_UpperCamelCase )
fig.axes.get_yaxis().set_visible(_UpperCamelCase )
plt.show()
def __lowerCAmelCase ( ) -> List[str]:
'''simple docstring'''
lowerCamelCase__: List[Any] = datetime.now()
lowerCamelCase__: int = current_time.strftime("""%H:%M:%S""" )
return timestamp
| 242 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Dict = tempfile.mkdtemp()
# fmt: off
UpperCAmelCase__ : Tuple = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""]
# fmt: on
UpperCAmelCase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
UpperCAmelCase__ : str = {
"""do_resize""": True,
"""size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.5, 0.5, 0.5],
"""image_std""": [0.5, 0.5, 0.5],
}
UpperCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname , _lowerCamelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
def _a (self , **_lowerCamelCase ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a (self , **_lowerCamelCase ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a (self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase__ : Any = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = self.get_tokenizer()
UpperCAmelCase__ : Any = self.get_image_processor()
UpperCAmelCase__ : str = VisionTextDualEncoderProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ : Any = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ : Any = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCAmelCase__ : int = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 )
UpperCAmelCase__ : str = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = self.get_image_processor()
UpperCAmelCase__ : Tuple = self.get_tokenizer()
UpperCAmelCase__ : Optional[int] = VisionTextDualEncoderProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
UpperCAmelCase__ : int = self.prepare_image_inputs()
UpperCAmelCase__ : Optional[int] = image_processor(_lowerCamelCase , return_tensors="""np""" )
UpperCAmelCase__ : Tuple = processor(images=_lowerCamelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = self.get_image_processor()
UpperCAmelCase__ : str = self.get_tokenizer()
UpperCAmelCase__ : List[str] = VisionTextDualEncoderProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
UpperCAmelCase__ : List[str] = """lower newer"""
UpperCAmelCase__ : str = processor(text=_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = tokenizer(_lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = self.get_image_processor()
UpperCAmelCase__ : Optional[Any] = self.get_tokenizer()
UpperCAmelCase__ : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
UpperCAmelCase__ : int = """lower newer"""
UpperCAmelCase__ : Dict = self.prepare_image_inputs()
UpperCAmelCase__ : List[Any] = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with self.assertRaises(_lowerCamelCase ):
processor()
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Any = self.get_image_processor()
UpperCAmelCase__ : Dict = self.get_tokenizer()
UpperCAmelCase__ : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
UpperCAmelCase__ : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase__ : str = processor.batch_decode(_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = tokenizer.batch_decode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = self.get_image_processor()
UpperCAmelCase__ : Tuple = self.get_tokenizer()
UpperCAmelCase__ : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
UpperCAmelCase__ : str = """lower newer"""
UpperCAmelCase__ : Any = self.prepare_image_inputs()
UpperCAmelCase__ : Union[str, Any] = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 182 |
"""simple docstring"""
import cmath
import math
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> complex:
UpperCAmelCase__ : str = math.radians(lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = math.radians(lowerCAmelCase )
# Convert voltage and current to rectangular form
UpperCAmelCase__ : Union[str, Any] = cmath.rect(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : List[str] = cmath.rect(lowerCAmelCase , lowerCAmelCase )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 182 | 1 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase : List[Any] =logging.get_logger(__name__)
def a__ (__lowercase :str , __lowercase :Union[str, Any]=False ) -> Union[str, Any]:
_A : Optional[int] = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token''') )
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings''') )
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias''') )
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias''') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias""") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_A : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
# fmt: on
return rename_keys
def a__ (__lowercase :Optional[Any] , __lowercase :Optional[int] , __lowercase :Optional[int]=False ) -> int:
for i in range(config.num_hidden_layers ):
if base_model:
_A : Any = ''''''
else:
_A : List[Any] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_A : int = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
_A : Dict = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_A : Any = in_proj_weight[
: config.hidden_size, :
]
_A : Optional[Any] = in_proj_bias[: config.hidden_size]
_A : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_A : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_A : Any = in_proj_weight[
-config.hidden_size :, :
]
_A : List[Any] = in_proj_bias[-config.hidden_size :]
def a__ (__lowercase :Union[str, Any] ) -> Union[str, Any]:
_A : List[str] = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def a__ (__lowercase :Union[str, Any] , __lowercase :Union[str, Any] , __lowercase :int ) -> Tuple:
_A : Any = dct.pop(__lowercase )
_A : int = val
def a__ () -> Optional[Any]:
_A : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_A : Tuple = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def a__ (__lowercase :List[Any] , __lowercase :Optional[int] , __lowercase :Optional[int]=False ) -> Union[str, Any]:
_A : Optional[Any] = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=__lowercase , )
_A : Tuple = ViTHybridConfig(backbone_config=__lowercase , image_size=384 , num_labels=1000 )
_A : Dict = False
# load original model from timm
_A : Tuple = timm.create_model(__lowercase , pretrained=__lowercase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_A : List[Any] = timm_model.state_dict()
if base_model:
remove_classification_head_(__lowercase )
_A : Union[str, Any] = create_rename_keys(__lowercase , __lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
read_in_q_k_v(__lowercase , __lowercase , __lowercase )
_A : Union[str, Any] = '''huggingface/label-files'''
_A : List[str] = '''imagenet-1k-id2label.json'''
_A : List[str] = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='''dataset''' ) , '''r''' ) )
_A : int = {int(__lowercase ): v for k, v in idalabel.items()}
_A : Optional[Any] = idalabel
_A : Dict = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
_A : str = ViTHybridModel(__lowercase ).eval()
else:
_A : Union[str, Any] = ViTHybridForImageClassification(__lowercase ).eval()
model.load_state_dict(__lowercase )
# create image processor
_A : Optional[Any] = create_transform(**resolve_data_config({} , model=__lowercase ) )
_A : Tuple = transform.transforms
_A : List[Any] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
_A : List[str] = ViTHybridImageProcessor(
do_resize=__lowercase , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__lowercase , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=__lowercase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
_A : List[str] = prepare_img()
_A : Dict = transform(__lowercase ).unsqueeze(0 )
_A : List[Any] = processor(__lowercase , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(__lowercase , __lowercase )
# verify logits
with torch.no_grad():
_A : Optional[int] = model(__lowercase )
_A : List[str] = outputs.logits
print('''Predicted class:''' , logits.argmax(-1 ).item() )
if base_model:
_A : Tuple = timm_model.forward_features(__lowercase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__lowercase , outputs.pooler_output , atol=1e-3 )
else:
_A : Optional[Any] = timm_model(__lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowercase , outputs.logits , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(__lowercase ).mkdir(exist_ok=__lowercase )
print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowercase )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(__lowercase )
if push_to_hub:
print(f"""Pushing model and processor to the hub {vit_name}""" )
model.push_to_hub(f"""ybelkada/{vit_name}""" )
processor.push_to_hub(f"""ybelkada/{vit_name}""" )
if __name__ == "__main__":
_UpperCamelCase : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_r50_s16_384',
type=str,
help='Name of the hybrid ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
_UpperCamelCase : int =parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 719 |
def a__ (__lowercase :str , __lowercase :str ) -> bool:
_A : Dict = len(__lowercase ) + 1
_A : Optional[int] = len(__lowercase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_A : Optional[Any] = [[0 for i in range(__lowercase )] for j in range(__lowercase )]
# since string of zero length match pattern of zero length
_A : int = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , __lowercase ):
_A : List[Any] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , __lowercase ):
_A : List[str] = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , __lowercase ):
for j in range(1 , __lowercase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_A : Tuple = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_A : List[str] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_A : List[Any] = dp[i - 1][j]
else:
_A : Optional[int] = 0
else:
_A : Optional[Any] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
_UpperCamelCase : Union[str, Any] ='aab'
_UpperCamelCase : Any ='c*a*b'
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'''{input_string} matches the given pattern {pattern}''')
else:
print(f'''{input_string} does not match with the given pattern {pattern}''')
| 332 | 0 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
_lowerCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCamelCase = 256
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = ["melgan"]
def __init__( self :List[Any] , __A :SpectrogramNotesEncoder , __A :SpectrogramContEncoder , __A :TaFilmDecoder , __A :DDPMScheduler , __A :OnnxRuntimeModel if is_onnx_available() else Any , ) -> None:
"""simple docstring"""
super().__init__()
# From MELGAN
SCREAMING_SNAKE_CASE__ = math.log(1E-5 ) # Matches MelGAN training.
SCREAMING_SNAKE_CASE__ = 4.0 # Largest value for most examples
SCREAMING_SNAKE_CASE__ = 128
self.register_modules(
notes_encoder=__A , continuous_encoder=__A , decoder=__A , scheduler=__A , melgan=__A , )
def _snake_case ( self :str , __A :List[Any] , __A :Optional[int]=(-1.0, 1.0) , __A :Optional[Any]=False ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = output_range
if clip:
SCREAMING_SNAKE_CASE__ = torch.clip(__A , self.min_value , self.max_value )
# Scale to [0, 1].
SCREAMING_SNAKE_CASE__ = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def _snake_case ( self :Dict , __A :Tuple , __A :str=(-1.0, 1.0) , __A :List[str]=False ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input_range
SCREAMING_SNAKE_CASE__ = torch.clip(__A , __A , __A ) if clip else outputs
# Scale to [0, 1].
SCREAMING_SNAKE_CASE__ = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def _snake_case ( self :Union[str, Any] , __A :Any , __A :List[Any] , __A :str ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = input_tokens > 0
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.notes_encoder(
encoder_input_tokens=__A , encoder_inputs_mask=__A )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.continuous_encoder(
encoder_inputs=__A , encoder_inputs_mask=__A )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def _snake_case ( self :Any , __A :int , __A :str , __A :Dict ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = noise_time
if not torch.is_tensor(__A ):
SCREAMING_SNAKE_CASE__ = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(__A ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE__ = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
SCREAMING_SNAKE_CASE__ = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
SCREAMING_SNAKE_CASE__ = self.decoder(
encodings_and_masks=__A , decoder_input_tokens=__A , decoder_noise_time=__A )
return logits
@torch.no_grad()
def __call__( self :Dict , __A :List[List[int]] , __A :Optional[torch.Generator] = None , __A :int = 100 , __A :bool = True , __A :str = "numpy" , __A :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __A :int = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
"""simple docstring"""
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__A , __A ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(__A )}.''' )
SCREAMING_SNAKE_CASE__ = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
SCREAMING_SNAKE_CASE__ = np.zeros([1, 0, self.n_dims] , np.floataa )
SCREAMING_SNAKE_CASE__ = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=__A , device=self.device )
for i, encoder_input_tokens in enumerate(__A ):
if i == 0:
SCREAMING_SNAKE_CASE__ = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
SCREAMING_SNAKE_CASE__ = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=__A , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
SCREAMING_SNAKE_CASE__ = ones
SCREAMING_SNAKE_CASE__ = self.scale_features(
__A , output_range=[-1.0, 1.0] , clip=__A )
SCREAMING_SNAKE_CASE__ = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=__A , continuous_mask=__A , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
SCREAMING_SNAKE_CASE__ = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=__A , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(__A )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
SCREAMING_SNAKE_CASE__ = self.decode(
encodings_and_masks=__A , input_tokens=__A , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
SCREAMING_SNAKE_CASE__ = self.scheduler.step(__A , __A , __A , generator=__A ).prev_sample
SCREAMING_SNAKE_CASE__ = self.scale_to_features(__A , input_range=[-1.0, 1.0] )
SCREAMING_SNAKE_CASE__ = mel[:1]
SCREAMING_SNAKE_CASE__ = mel.cpu().float().numpy()
SCREAMING_SNAKE_CASE__ = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__A , __A )
logger.info("""Generated segment""" , __A )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"""Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'.""" )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"""Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'.""" )
if output_type == "numpy":
SCREAMING_SNAKE_CASE__ = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
SCREAMING_SNAKE_CASE__ = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=__A ) | 6 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "encoder-decoder"
lowerCamelCase_ = True
def __init__( self :Optional[int] , **__A :str ) -> int:
"""simple docstring"""
super().__init__(**__A )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
SCREAMING_SNAKE_CASE__ = kwargs.pop("""encoder""" )
SCREAMING_SNAKE_CASE__ = encoder_config.pop("""model_type""" )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""decoder""" )
SCREAMING_SNAKE_CASE__ = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
SCREAMING_SNAKE_CASE__ = AutoConfig.for_model(__A , **__A )
SCREAMING_SNAKE_CASE__ = AutoConfig.for_model(__A , **__A )
SCREAMING_SNAKE_CASE__ = True
@classmethod
def _snake_case ( cls :str , __A :PretrainedConfig , __A :PretrainedConfig , **__A :List[str] ) -> PretrainedConfig:
"""simple docstring"""
logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__A )
def _snake_case ( self :str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ = self.encoder.to_dict()
SCREAMING_SNAKE_CASE__ = self.decoder.to_dict()
SCREAMING_SNAKE_CASE__ = self.__class__.model_type
return output | 6 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
__lowerCAmelCase = None
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCAmelCase = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
__lowerCAmelCase = {
'''facebook/nllb-large-en-ro''': 10_24,
'''facebook/nllb-200-distilled-600M''': 10_24,
}
# fmt: off
__lowerCAmelCase = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class __a ( __UpperCamelCase ):
__lowercase : List[str] = VOCAB_FILES_NAMES
__lowercase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : int = ['input_ids', 'attention_mask']
__lowercase : Dict = NllbTokenizer
__lowercase : List[int] = []
__lowercase : List[int] = []
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> str:
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
lowercase__: Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
lowercase__: Dict = legacy_behaviour
super().__init__(
vocab_file=lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , legacy_behaviour=lowerCAmelCase__ , **lowerCAmelCase__ , )
lowercase__: Optional[int] = vocab_file
lowercase__: Optional[Any] = False if not self.vocab_file else True
lowercase__: Optional[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowercase__: List[Any] = {
lang_code: self.convert_tokens_to_ids(lowerCAmelCase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowercase__: Optional[int] = src_lang if src_lang is not None else 'eng_Latn'
lowercase__: Optional[Any] = self.convert_tokens_to_ids(self._src_lang )
lowercase__: List[str] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> None:
'''simple docstring'''
lowercase__: Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
'''simple docstring'''
lowercase__: int = [self.sep_token_id]
lowercase__: int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowercase__: int = src_lang
lowercase__: Union[str, Any] = self(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
lowercase__: List[Any] = self.convert_tokens_to_ids(lowerCAmelCase__ )
lowercase__: Union[str, Any] = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = "eng_Latn" , lowerCAmelCase__ = None , lowerCAmelCase__ = "fra_Latn" , **lowerCAmelCase__ , ) -> BatchEncoding:
'''simple docstring'''
lowercase__: int = src_lang
lowercase__: str = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> None:
'''simple docstring'''
lowercase__: Union[str, Any] = self.convert_tokens_to_ids(lowerCAmelCase__ )
if self.legacy_behaviour:
lowercase__: int = []
lowercase__: Tuple = [self.eos_token_id, self.cur_lang_code]
else:
lowercase__: Tuple = [self.cur_lang_code]
lowercase__: List[Any] = [self.eos_token_id]
lowercase__: Any = self.convert_ids_to_tokens(self.prefix_tokens )
lowercase__: Any = self.convert_ids_to_tokens(self.suffix_tokens )
lowercase__: int = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> None:
'''simple docstring'''
lowercase__: List[Any] = self.convert_tokens_to_ids(lowerCAmelCase__ )
if self.legacy_behaviour:
lowercase__: Optional[Any] = []
lowercase__: Any = [self.eos_token_id, self.cur_lang_code]
else:
lowercase__: Union[str, Any] = [self.cur_lang_code]
lowercase__: Tuple = [self.eos_token_id]
lowercase__: Dict = self.convert_ids_to_tokens(self.prefix_tokens )
lowercase__: Optional[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
lowercase__: str = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
lowercase__: Union[str, Any] = os.path.join(
lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 335 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCAmelCase = {
'''vocab_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/vocab.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/vocab.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/vocab.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/vocab.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/vocab.json''',
},
'''merges_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/merges.txt''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/merges.txt''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/merges.txt''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/merges.txt''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/tokenizer.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/tokenizer.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/tokenizer.json''',
},
}
__lowerCAmelCase = {
'''gpt2''': 10_24,
'''gpt2-medium''': 10_24,
'''gpt2-large''': 10_24,
'''gpt2-xl''': 10_24,
'''distilgpt2''': 10_24,
}
class __a ( __UpperCamelCase ):
__lowercase : Optional[int] = VOCAB_FILES_NAMES
__lowercase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Dict = ['input_ids', 'attention_mask']
__lowercase : Any = GPTaTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="<|endoftext|>" , lowerCAmelCase__="<|endoftext|>" , lowerCAmelCase__="<|endoftext|>" , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
lowerCAmelCase__ , lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
lowercase__: Optional[Any] = kwargs.pop('add_bos_token' , lowerCAmelCase__ )
lowercase__: Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowerCAmelCase__ ) != add_prefix_space:
lowercase__: Optional[int] = getattr(lowerCAmelCase__ , pre_tok_state.pop('type' ) )
lowercase__: Union[str, Any] = add_prefix_space
lowercase__: Tuple = pre_tok_class(**lowerCAmelCase__ )
lowercase__: Optional[int] = add_prefix_space
def SCREAMING_SNAKE_CASE__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> BatchEncoding:
'''simple docstring'''
lowercase__: List[str] = kwargs.get('is_split_into_words' , lowerCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> BatchEncoding:
'''simple docstring'''
lowercase__: Union[str, Any] = kwargs.get('is_split_into_words' , lowerCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
lowercase__: Dict = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[int]:
'''simple docstring'''
lowercase__: List[str] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) + [self.eos_token_id] )
if len(lowerCAmelCase__ ) > self.model_max_length:
lowercase__: Union[str, Any] = input_ids[-self.model_max_length :]
return input_ids
| 335 | 1 |
import copy
import random
from transformers import CLIPTokenizer
class UpperCamelCase ( _UpperCAmelCase ):
def __init__( self , *UpperCAmelCase__ , **UpperCAmelCase__ ):
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
A__ = {}
def __A ( self , UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__ ):
A__ = super().add_tokens(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__ )
if num_added_tokens == 0:
raise ValueError(
F"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
" `placeholder_token` that is not already in the tokenizer." )
def __A ( self , UpperCAmelCase__ , *UpperCAmelCase__ , UpperCAmelCase__=1 , **UpperCAmelCase__ ):
A__ = []
if num_vec_per_token == 1:
self.try_adding_tokens(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__ )
output.append(UpperCAmelCase__ )
else:
A__ = []
for i in range(UpperCAmelCase__ ):
A__ = placeholder_token + F"""_{i}"""
self.try_adding_tokens(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__ )
output.append(UpperCAmelCase__ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F"""The tokenizer already has placeholder token {token} that can get confused with"""
F""" {placeholder_token}keep placeholder tokens independent""" )
A__ = output
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__=False , UpperCAmelCase__=1.0 ):
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = []
for i in range(len(UpperCAmelCase__ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=UpperCAmelCase__ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
A__ = self.token_map[placeholder_token]
A__ = tokens[: 1 + int(len(UpperCAmelCase__ ) * prop_tokens_to_load )]
if vector_shuffle:
A__ = copy.copy(UpperCAmelCase__ )
random.shuffle(UpperCAmelCase__ )
A__ = text.replace(UpperCAmelCase__ , " ".join(UpperCAmelCase__ ) )
return text
def __call__( self , UpperCAmelCase__ , *UpperCAmelCase__ , UpperCAmelCase__=False , UpperCAmelCase__=1.0 , **UpperCAmelCase__ ):
return super().__call__(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase__ , vector_shuffle=UpperCAmelCase__ , prop_tokens_to_load=UpperCAmelCase__ ) , *UpperCAmelCase__ , **UpperCAmelCase__ , )
def __A ( self , UpperCAmelCase__ , *UpperCAmelCase__ , UpperCAmelCase__=False , UpperCAmelCase__=1.0 , **UpperCAmelCase__ ):
return super().encode(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase__ , vector_shuffle=UpperCAmelCase__ , prop_tokens_to_load=UpperCAmelCase__ ) , *UpperCAmelCase__ , **UpperCAmelCase__ , )
| 491 |
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase ( _UpperCAmelCase ):
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=768 ):
super().__init__(UpperCAmelCase__ )
A__ = proj_size
A__ = CLIPVisionModel(UpperCAmelCase__ )
A__ = PaintByExampleMapper(UpperCAmelCase__ )
A__ = nn.LayerNorm(config.hidden_size )
A__ = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
A__ = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__=False ):
A__ = self.model(pixel_values=UpperCAmelCase__ )
A__ = clip_output.pooler_output
A__ = self.mapper(latent_states[:, None] )
A__ = self.final_layer_norm(UpperCAmelCase__ )
A__ = self.proj_out(UpperCAmelCase__ )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class UpperCamelCase ( nn.Module ):
def __init__( self , UpperCAmelCase__ ):
super().__init__()
A__ = (config.num_hidden_layers + 1) // 5
A__ = config.hidden_size
A__ = 1
A__ = nn.ModuleList(
[
BasicTransformerBlock(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , activation_fn="gelu" , attention_bias=UpperCAmelCase__ )
for _ in range(UpperCAmelCase__ )
] )
def __A ( self , UpperCAmelCase__ ):
for block in self.blocks:
A__ = block(UpperCAmelCase__ )
return hidden_states
| 491 | 1 |
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowerCamelCase : Tuple = re.compile("""[^A-Za-z_0-9]""")
# parameters used in DuplicationIndex
lowerCamelCase : Tuple = 1_0
lowerCamelCase : str = 2_5_6
def A__ ( UpperCamelCase__ ):
'''simple docstring'''
if len(UpperCamelCase__ ) < MIN_NUM_TOKENS:
return None
_SCREAMING_SNAKE_CASE = MinHash(num_perm=UpperCamelCase__ )
for token in set(UpperCamelCase__ ):
min_hash.update(token.encode() )
return min_hash
def A__ ( UpperCamelCase__ ):
'''simple docstring'''
return {t for t in NON_ALPHA.split(UpperCamelCase__ ) if len(t.strip() ) > 0}
class __snake_case:
def __init__( self , *,
A_ = 0.85 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = duplication_jaccard_threshold
_SCREAMING_SNAKE_CASE = NUM_PERM
_SCREAMING_SNAKE_CASE = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_SCREAMING_SNAKE_CASE = defaultdict(A_ )
def A ( self , A_ , A_ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self._index.query(A_ )
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''' )
return
self._index.insert(A_ , A_ )
if len(A_ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(A_ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(A_ )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
for base, duplicates in self._duplicate_clusters.items():
_SCREAMING_SNAKE_CASE = [base] + list(A_ )
# reformat the cluster to be a list of dict
_SCREAMING_SNAKE_CASE = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(A_ )
return duplicate_clusters
def A ( self , A_ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.get_duplicate_clusters()
with open(A_ , '''w''' ) as f:
json.dump(A_ , A_ )
def A__ ( UpperCamelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = element
_SCREAMING_SNAKE_CASE = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def A__ ( UpperCamelCase__ ):
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(UpperCamelCase__ , max_queue_size=10_000 ) , chunksize=100 , ):
if data is not None:
yield data
def A__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = DuplicationIndex(duplication_jaccard_threshold=UpperCamelCase__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(UpperCamelCase__ ) ) , max_queue_size=100 ) ):
di.add(UpperCamelCase__ , UpperCamelCase__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def A__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = get_tokens(UpperCamelCase__ )
_SCREAMING_SNAKE_CASE = get_tokens(UpperCamelCase__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowerCamelCase : List[Any] = None
def A__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
for elementa in cluster:
_SCREAMING_SNAKE_CASE = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
_SCREAMING_SNAKE_CASE = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(UpperCamelCase__ , UpperCamelCase__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_SCREAMING_SNAKE_CASE = 1
extremes.append(UpperCamelCase__ )
return extremes
def A__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
global _shared_dataset
_SCREAMING_SNAKE_CASE = dataset
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = partial(_find_cluster_extremes_shared , jaccard_threshold=UpperCamelCase__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
UpperCamelCase__ , UpperCamelCase__ , ) , total=len(UpperCamelCase__ ) , ):
extremes_list.append(UpperCamelCase__ )
return extremes_list
def A__ ( UpperCamelCase__ , UpperCamelCase__ = 0.85 ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = make_duplicate_clusters(UpperCamelCase__ , UpperCamelCase__ )
_SCREAMING_SNAKE_CASE = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = find_extremes(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for extremes in extremes_clusters:
for element in extremes:
_SCREAMING_SNAKE_CASE = element
_SCREAMING_SNAKE_CASE = duplicate_indices - set(extreme_dict.keys() )
_SCREAMING_SNAKE_CASE = dataset.filter(lambda UpperCamelCase__ , UpperCamelCase__ : idx not in remove_indices , with_indices=UpperCamelCase__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_SCREAMING_SNAKE_CASE = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
_SCREAMING_SNAKE_CASE = extreme_dict[element['''base_index''']]['''copies''']
print(F'''Original dataset size: {len(UpperCamelCase__ )}''' )
print(F'''Number of duplicate clusters: {len(UpperCamelCase__ )}''' )
print(F'''Files in duplicate cluster: {len(UpperCamelCase__ )}''' )
print(F'''Unique files in duplicate cluster: {len(UpperCamelCase__ )}''' )
print(F'''Filtered dataset size: {len(UpperCamelCase__ )}''' )
return ds_filter, duplicate_clusters
| 714 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase : Tuple = {"""configuration_deit""": ["""DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DeiTConfig""", """DeiTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[Any] = ["""DeiTFeatureExtractor"""]
lowerCamelCase : int = ["""DeiTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Any = [
"""DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DeiTForImageClassification""",
"""DeiTForImageClassificationWithTeacher""",
"""DeiTForMaskedImageModeling""",
"""DeiTModel""",
"""DeiTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Any = [
"""TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDeiTForImageClassification""",
"""TFDeiTForImageClassificationWithTeacher""",
"""TFDeiTForMaskedImageModeling""",
"""TFDeiTModel""",
"""TFDeiTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 168 | 0 |
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
SCREAMING_SNAKE_CASE = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
SCREAMING_SNAKE_CASE = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
SCREAMING_SNAKE_CASE = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def A__ ( self : int ) -> int:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def A__ ( self : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : str=1 , UpperCAmelCase : List[Any]="binary" , UpperCAmelCase : str=None ) -> Optional[int]:
'''simple docstring'''
lowercase : Union[str, Any] =fa_score(
UpperCAmelCase , UpperCAmelCase , labels=UpperCAmelCase , pos_label=UpperCAmelCase , average=UpperCAmelCase , sample_weight=UpperCAmelCase )
return {"f1": float(UpperCAmelCase ) if score.size == 1 else score}
| 94 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
},
'tokenizer_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
SCREAMING_SNAKE_CASE = '▁'
# Segments (not really needed)
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 4
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = '''left'''
UpperCamelCase_ = XLNetTokenizer
def __init__( self : int , UpperCAmelCase : Dict=None , UpperCAmelCase : str=None , UpperCAmelCase : str=False , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : int="<s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : str="<unk>" , UpperCAmelCase : Optional[Any]="<sep>" , UpperCAmelCase : Optional[int]="<pad>" , UpperCAmelCase : Optional[Any]="<cls>" , UpperCAmelCase : Dict="<mask>" , UpperCAmelCase : int=["<eop>", "<eod>"] , **UpperCAmelCase : List[Any] , ) -> List[str]:
'''simple docstring'''
lowercase : Dict =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
super().__init__(
vocab_file=UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , remove_space=UpperCAmelCase , keep_accents=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , additional_special_tokens=UpperCAmelCase , **UpperCAmelCase , )
lowercase : Tuple =3
lowercase : Union[str, Any] =do_lower_case
lowercase : Any =remove_space
lowercase : int =keep_accents
lowercase : int =vocab_file
lowercase : Union[str, Any] =False if not self.vocab_file else True
def A__ ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Union[str, Any] =[self.sep_token_id]
lowercase : Optional[Any] =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def A__ ( self : str , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Optional[int] =[self.sep_token_id]
lowercase : Union[str, Any] =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def A__ ( self : str , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase : Dict =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ):
copyfile(self.vocab_file , UpperCAmelCase )
return (out_vocab_file,)
| 94 | 1 |
"""simple docstring"""
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :str = "M-CLIP"
def __init__( self : List[Any] , snake_case__ : Tuple=1024 , snake_case__ : Any=768 , **snake_case__ : List[Any] ):
lowerCamelCase_ : str =transformerDimSize
lowerCamelCase_ : List[Any] =imageDimSize
super().__init__(**snake_case__ )
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :int = MCLIPConfig
def __init__( self : List[Any] , snake_case__ : Optional[Any] , *snake_case__ : Any , **snake_case__ : int ):
super().__init__(snake_case__ , *snake_case__ , **snake_case__ )
lowerCamelCase_ : Optional[Any] =XLMRobertaModel(snake_case__ )
lowerCamelCase_ : Optional[Any] =torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : List[Any] , snake_case__ : str ):
lowerCamelCase_ : List[str] =self.transformer(input_ids=snake_case__ , attention_mask=snake_case__ )[0]
lowerCamelCase_ : Dict =(embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(snake_case__ ), embs
| 244 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
A__ : List[str] = logging.get_logger(__name__)
A__ : Tuple = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A__ : List[str] = {
'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'},
'tokenizer_file': {
'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'
},
}
A__ : List[Any] = {'mobilebert-uncased': 512}
A__ : List[Any] = {}
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :List[Any] = VOCAB_FILES_NAMES
_UpperCAmelCase :str = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :Dict = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :int = MobileBertTokenizer
def __init__( self : Tuple , snake_case__ : Any=None , snake_case__ : Any=None , snake_case__ : Optional[Any]=True , snake_case__ : Union[str, Any]="[UNK]" , snake_case__ : Union[str, Any]="[SEP]" , snake_case__ : Any="[PAD]" , snake_case__ : int="[CLS]" , snake_case__ : int="[MASK]" , snake_case__ : Optional[Any]=True , snake_case__ : int=None , **snake_case__ : List[Any] , ):
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , tokenize_chinese_chars=snake_case__ , strip_accents=snake_case__ , **snake_case__ , )
lowerCamelCase_ : Optional[int] =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , snake_case__ ) != do_lower_case
or normalizer_state.get("strip_accents" , snake_case__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , snake_case__ ) != tokenize_chinese_chars
):
lowerCamelCase_ : str =getattr(snake_case__ , normalizer_state.pop("type" ) )
lowerCamelCase_ : Union[str, Any] =do_lower_case
lowerCamelCase_ : List[Any] =strip_accents
lowerCamelCase_ : List[Any] =tokenize_chinese_chars
lowerCamelCase_ : Optional[Any] =normalizer_class(**snake_case__ )
lowerCamelCase_ : int =do_lower_case
def UpperCAmelCase__ ( self : List[str] , snake_case__ : Optional[Any] , snake_case__ : Dict=None ):
lowerCamelCase_ : Optional[int] =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ ( self : str , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
lowerCamelCase_ : Optional[Any] =[self.sep_token_id]
lowerCamelCase_ : Optional[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ):
lowerCamelCase_ : Optional[Any] =self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 244 | 1 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
_UpperCamelCase = """\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
_UpperCamelCase = """\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
_UpperCamelCase = """
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def lowerCAmelCase__( lowercase : Tuple , lowercase : Dict ) -> int:
return float((preds == labels).mean() )
def lowerCAmelCase__( lowercase : str , lowercase : Dict , lowercase : Tuple="binary" ) -> Dict:
__snake_case : str = simple_accuracy(lowercase , lowercase )
__snake_case : Dict = float(fa_score(y_true=lowercase , y_pred=lowercase , average=lowercase ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCAmelCase__( lowercase : Dict , lowercase : Tuple ) -> List[str]:
__snake_case : Optional[int] = {}
for id_pred, label in zip(lowercase , lowercase ):
__snake_case : List[Any] = f"""{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}"""
__snake_case : Dict = id_pred["prediction"]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
__snake_case : Tuple = [(pred, label)]
__snake_case , __snake_case : Any = [], []
for question, preds_labels in question_map.items():
__snake_case , __snake_case : List[Any] = zip(*lowercase )
__snake_case : int = fa_score(y_true=lowercase , y_pred=lowercase , average="macro" )
fas.append(lowercase )
__snake_case : List[Any] = int(sum(pred == label for pred, label in preds_labels ) == len(lowercase ) )
ems.append(lowercase )
__snake_case : Optional[Any] = float(sum(lowercase ) / len(lowercase ) )
__snake_case : List[str] = sum(lowercase ) / len(lowercase )
__snake_case : Optional[int] = float(fa_score(y_true=lowercase , y_pred=[id_pred["prediction"] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCamelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="numpy" if not self.config_name == "record" and not self.config_name == "multirc" else None , )
def UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"prediction_text": datasets.Value("string" ),
},
"references": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"answers": datasets.Sequence(datasets.Value("string" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("int64" ),
"paragraph": datasets.Value("int64" ),
"question": datasets.Value("int64" ),
},
"prediction": datasets.Value("int64" ),
},
"references": datasets.Value("int64" ),
}
else:
return {
"predictions": datasets.Value("int64" ),
"references": datasets.Value("int64" ),
}
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(UpperCAmelCase , UpperCAmelCase )}
elif self.config_name == "cb":
return acc_and_fa(UpperCAmelCase , UpperCAmelCase , fa_avg="macro" )
elif self.config_name == "record":
__snake_case : Dict = [
{
"qas": [
{"id": ref["idx"]["query"], "answers": [{"text": ans} for ans in ref["answers"]]}
for ref in references
]
}
]
__snake_case : Tuple = {pred["idx"]["query"]: pred["prediction_text"] for pred in predictions}
return evaluate_record(UpperCAmelCase , UpperCAmelCase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(UpperCAmelCase , UpperCAmelCase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(UpperCAmelCase , UpperCAmelCase )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
| 243 | """simple docstring"""
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
SCREAMING_SNAKE_CASE__:Dict = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
def __init__( self , *lowerCamelCase , **lowerCamelCase ):
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 528 | 0 |
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
SCREAMING_SNAKE_CASE__ = "Hello world! cécé herlolip"
SCREAMING_SNAKE_CASE__ = namedtuple(
"""BertAbsConfig""",
[
"""temp_dir""",
"""large""",
"""use_bert_emb""",
"""finetune_bert""",
"""encoder""",
"""share_emb""",
"""max_pos""",
"""enc_layers""",
"""enc_hidden_size""",
"""enc_heads""",
"""enc_ff_size""",
"""enc_dropout""",
"""dec_layers""",
"""dec_hidden_size""",
"""dec_heads""",
"""dec_ff_size""",
"""dec_dropout""",
],
)
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
lowercase_ = BertAbsConfig(
temp_dir="." , finetune_bert=__A , large=__A , share_emb=__A , use_bert_emb=__A , encoder="bert" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
lowercase_ = torch.load(__A , lambda __lowerCamelCase , __lowerCamelCase : storage )
lowercase_ = AbsSummarizer(__A , torch.device("cpu" ) , __A )
original.eval()
lowercase_ = BertAbsSummarizer(__A , torch.device("cpu" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical" )
lowercase_ = BertTokenizer.from_pretrained("bert-base-uncased" )
# prepare the model inputs
lowercase_ = tokenizer.encode("This is sample éàalj'-." )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(__A )) )
lowercase_ = torch.tensor(__A ).unsqueeze(0 )
lowercase_ = tokenizer.encode("This is sample 3 éàalj'-." )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(__A )) )
lowercase_ = torch.tensor(__A ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
lowercase_ = encoder_input_ids
lowercase_ = decoder_input_ids
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
lowercase_ = original(__A , __A , __A , __A , __A , __A , __A )[0]
lowercase_ = original.generator(__A )
lowercase_ = new_model(
__A , __A , __A , __A , __A )[0]
lowercase_ = new_model.generator(__A )
lowercase_ = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(__A ) )
lowercase_ = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(__A ) )
lowercase_ = torch.allclose(__A , __A , atol=1E-3 )
if are_identical:
logging.info("all weights are equal up to 1e-3" )
else:
raise ValueError("the weights are different. The new model is likely different from the original one." )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary" )
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
"""--bertabs_checkpoint_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 715 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
SCREAMING_SNAKE_CASE__ = get_logger(__name__)
class __lowerCamelCase ( enum.Enum ):
"""simple docstring"""
lowerCAmelCase__ = "all_checks"
lowerCAmelCase__ = "basic_checks"
lowerCAmelCase__ = "no_checks"
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[dict] , __lowerCamelCase: dict , __lowerCamelCase: Optional[int]=None ):
'''simple docstring'''
if expected_checksums is None:
logger.info("Unable to verify checksums." )
return
if len(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) )
if len(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) )
lowercase_ = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
lowercase_ = " for " + verification_name if verification_name is not None else ""
if len(__lowerCamelCase ) > 0:
raise NonMatchingChecksumError(
F'Checksums didn\'t match{for_verification_name}:\n'
F'{bad_urls}\n'
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" )
logger.info("All the checksums matched successfully" + for_verification_name )
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[dict] , __lowerCamelCase: dict ):
'''simple docstring'''
if expected_splits is None:
logger.info("Unable to verify splits sizes." )
return
if len(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) > 0:
raise ExpectedMoreSplits(str(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) )
if len(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) > 0:
raise UnexpectedSplits(str(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) )
lowercase_ = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(__lowerCamelCase ) > 0:
raise NonMatchingSplitsSizesError(str(__lowerCamelCase ) )
logger.info("All the splits matched successfully." )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str , __lowerCamelCase: bool = True ):
'''simple docstring'''
if record_checksum:
lowercase_ = shaaaa()
with open(__lowerCamelCase , "rb" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B"" ):
m.update(__lowerCamelCase )
lowercase_ = m.hexdigest()
else:
lowercase_ = None
return {"num_bytes": os.path.getsize(__lowerCamelCase ), "checksum": checksum}
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Tuple ):
'''simple docstring'''
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 601 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.