code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _lowercase ( __A ,__A ,__A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = TapasConfig.from_json_file(a__ )
# set absolute/relative position embeddings parameter
__UpperCamelCase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__UpperCamelCase = TapasForQuestionAnswering(config=a__ )
elif task == "WTQ":
# run_task_main.py hparams
__UpperCamelCase = 4
__UpperCamelCase = True
# hparam_utils.py hparams
__UpperCamelCase = 0.66_4694
__UpperCamelCase = 0.20_7951
__UpperCamelCase = 0.12_1194
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = 0.035_2513
__UpperCamelCase = TapasForQuestionAnswering(config=a__ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__UpperCamelCase = 4
__UpperCamelCase = False
# hparam_utils.py hparams
__UpperCamelCase = 36.4519
__UpperCamelCase = 0.90_3421
__UpperCamelCase = 222.088
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = 0.76_3141
__UpperCamelCase = TapasForQuestionAnswering(config=a__ )
elif task == "TABFACT":
__UpperCamelCase = TapasForSequenceClassification(config=a__ )
elif task == "MLM":
__UpperCamelCase = TapasForMaskedLM(config=a__ )
elif task == "INTERMEDIATE_PRETRAINING":
__UpperCamelCase = TapasModel(config=a__ )
else:
raise ValueError(f"Task {task} not supported." )
print(f"Building PyTorch model from configuration: {config}" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(a__ ,a__ ,a__ )
# Save pytorch-model (weights and configuration)
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(a__ )
# Save tokenizer files
print(f"Save tokenizer files to {pytorch_dump_path}" )
__UpperCamelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" ,model_max_length=512 )
tokenizer.save_pretrained(a__ )
print("""Used relative position embeddings:""" ,model.config.reset_position_index_per_cell )
if __name__ == "__main__":
a__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a__ : Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 601 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
UpperCAmelCase : Any = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {}
with open(a__ , """r""" ) as file:
for line_number, line in enumerate(a__ ):
__SCREAMING_SNAKE_CASE = line.strip()
if line:
__SCREAMING_SNAKE_CASE = line.split()
__SCREAMING_SNAKE_CASE = line_number
__SCREAMING_SNAKE_CASE = words[0]
__SCREAMING_SNAKE_CASE = value
return result
def a__ ( a__ , a__ , a__ , a__ , a__ ):
"""simple docstring"""
for attribute in key.split(""".""" ):
__SCREAMING_SNAKE_CASE = getattr(a__ , a__ )
__SCREAMING_SNAKE_CASE = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(a__ ):
__SCREAMING_SNAKE_CASE = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__SCREAMING_SNAKE_CASE = """param"""
if weight_type is not None and weight_type != "param":
__SCREAMING_SNAKE_CASE = getattr(a__ , a__ ).shape
elif weight_type is not None and weight_type == "param":
__SCREAMING_SNAKE_CASE = hf_pointer
for attribute in hf_param_name.split(""".""" ):
__SCREAMING_SNAKE_CASE = getattr(a__ , a__ )
__SCREAMING_SNAKE_CASE = shape_pointer.shape
# let's reduce dimension
__SCREAMING_SNAKE_CASE = value[0]
else:
__SCREAMING_SNAKE_CASE = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_g":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_v":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "bias":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
__SCREAMING_SNAKE_CASE = getattr(a__ , a__ )
__SCREAMING_SNAKE_CASE = value
else:
__SCREAMING_SNAKE_CASE = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def a__ ( a__ , a__ , a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(a__ ):
__SCREAMING_SNAKE_CASE = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__SCREAMING_SNAKE_CASE = """param"""
if weight_type is not None and weight_type != "param":
__SCREAMING_SNAKE_CASE = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__SCREAMING_SNAKE_CASE = """.""".join([key, hf_param_name] )
else:
__SCREAMING_SNAKE_CASE = key
__SCREAMING_SNAKE_CASE = value if """lm_head""" in full_key else value[0]
UpperCAmelCase : Any = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def a__ ( a__ , a__ , a__=None , a__=None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = False
for key, mapped_key in MAPPING.items():
__SCREAMING_SNAKE_CASE = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__SCREAMING_SNAKE_CASE = True
if "*" in mapped_key:
__SCREAMING_SNAKE_CASE = name.split(a__ )[0].split(""".""" )[-2]
__SCREAMING_SNAKE_CASE = mapped_key.replace("""*""" , a__ )
if "weight_g" in name:
__SCREAMING_SNAKE_CASE = """weight_g"""
elif "weight_v" in name:
__SCREAMING_SNAKE_CASE = """weight_v"""
elif "bias" in name:
__SCREAMING_SNAKE_CASE = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__SCREAMING_SNAKE_CASE = """weight"""
else:
__SCREAMING_SNAKE_CASE = None
if hf_dict is not None:
rename_dict(a__ , a__ , a__ , a__ , a__ )
else:
set_recursively(a__ , a__ , a__ , a__ , a__ )
return is_used
return is_used
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = fairseq_model.state_dict()
__SCREAMING_SNAKE_CASE = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__SCREAMING_SNAKE_CASE = False
if "conv_layers" in name:
load_conv_layer(
a__ , a__ , a__ , a__ , hf_model.config.feat_extract_norm == """group""" , )
__SCREAMING_SNAKE_CASE = True
else:
__SCREAMING_SNAKE_CASE = load_wavaveca_layer(a__ , a__ , a__ )
if not is_used:
unused_weights.append(a__ )
logger.warning(F'Unused weights: {unused_weights}' )
def a__ ( a__ , a__ , a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = full_name.split("""conv_layers.""" )[-1]
__SCREAMING_SNAKE_CASE = name.split(""".""" )
__SCREAMING_SNAKE_CASE = int(items[0] )
__SCREAMING_SNAKE_CASE = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
__SCREAMING_SNAKE_CASE = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
__SCREAMING_SNAKE_CASE = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
__SCREAMING_SNAKE_CASE = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
__SCREAMING_SNAKE_CASE = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(a__ )
@torch.no_grad()
def a__ ( a__ , a__ , a__=None , a__=None , a__=True , a__=False ):
"""simple docstring"""
if config_path is not None:
__SCREAMING_SNAKE_CASE = WavaVecaConfig.from_pretrained(a__ )
else:
__SCREAMING_SNAKE_CASE = WavaVecaConfig()
if is_seq_class:
__SCREAMING_SNAKE_CASE = read_txt_into_dict(a__ )
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = WavaVecaForSequenceClassification(a__ )
__SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=a__ , return_attention_mask=a__ , )
feature_extractor.save_pretrained(a__ )
elif is_finetuned:
if dict_path:
__SCREAMING_SNAKE_CASE = Dictionary.load(a__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__SCREAMING_SNAKE_CASE = target_dict.pad_index
__SCREAMING_SNAKE_CASE = target_dict.bos_index
__SCREAMING_SNAKE_CASE = target_dict.eos_index
__SCREAMING_SNAKE_CASE = len(target_dict.symbols )
__SCREAMING_SNAKE_CASE = os.path.join(a__ , """vocab.json""" )
if not os.path.isdir(a__ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(a__ ) )
return
os.makedirs(a__ , exist_ok=a__ )
__SCREAMING_SNAKE_CASE = target_dict.indices
# fairseq has the <pad> and <s> switched
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1
with open(a__ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(a__ , a__ )
__SCREAMING_SNAKE_CASE = WavaVecaCTCTokenizer(
a__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=a__ , )
__SCREAMING_SNAKE_CASE = True if config.feat_extract_norm == """layer""" else False
__SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=a__ , return_attention_mask=a__ , )
__SCREAMING_SNAKE_CASE = WavaVecaProcessor(feature_extractor=a__ , tokenizer=a__ )
processor.save_pretrained(a__ )
__SCREAMING_SNAKE_CASE = WavaVecaForCTC(a__ )
else:
__SCREAMING_SNAKE_CASE = WavaVecaForPreTraining(a__ )
if is_finetuned or is_seq_class:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__SCREAMING_SNAKE_CASE = argparse.Namespace(task="""audio_pretraining""" )
__SCREAMING_SNAKE_CASE = fairseq.tasks.setup_task(a__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=a__ )
__SCREAMING_SNAKE_CASE = model[0].eval()
recursively_load_weights(a__ , a__ , not is_finetuned )
hf_wavavec.save_pretrained(a__ )
if __name__ == "__main__":
UpperCAmelCase : int = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
UpperCAmelCase : Optional[Any] = parser.parse_args()
UpperCAmelCase : int = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 627 | 0 |
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
_UpperCamelCase : Tuple =random.Random()
def lowerCamelCase_ ( A_ , A_=1.0 , A_=None , A_=None ):
if rng is None:
__lowerCamelCase = global_rng
__lowerCamelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _snake_case , _snake_case=7 , _snake_case=4_00 , _snake_case=20_00 , _snake_case=20_48 , _snake_case=1_28 , _snake_case=1 , _snake_case=5_12 , _snake_case=30 , _snake_case=4_41_00 , ):
"""simple docstring"""
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = min_seq_length
__lowerCamelCase = max_seq_length
__lowerCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowerCamelCase = spectrogram_length
__lowerCamelCase = feature_size
__lowerCamelCase = num_audio_channels
__lowerCamelCase = hop_length
__lowerCamelCase = chunk_length
__lowerCamelCase = sampling_rate
def _lowerCamelCase ( self ):
"""simple docstring"""
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def _lowerCamelCase ( self , _snake_case=False , _snake_case=False ):
"""simple docstring"""
def _flatten(_snake_case ):
return list(itertools.chain(*_snake_case ) )
if equal_length:
__lowerCamelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__lowerCamelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__lowerCamelCase = [np.asarray(_snake_case ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TvltFeatureExtractor
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = TvltFeatureExtractionTester(self )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_snake_case , '''spectrogram_length''' ) )
self.assertTrue(hasattr(_snake_case , '''feature_size''' ) )
self.assertTrue(hasattr(_snake_case , '''num_audio_channels''' ) )
self.assertTrue(hasattr(_snake_case , '''hop_length''' ) )
self.assertTrue(hasattr(_snake_case , '''chunk_length''' ) )
self.assertTrue(hasattr(_snake_case , '''sampling_rate''' ) )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase = feat_extract_first.save_pretrained(_snake_case )[0]
check_json_file_has_correct_format(_snake_case )
__lowerCamelCase = self.feature_extraction_class.from_pretrained(_snake_case )
__lowerCamelCase = feat_extract_first.to_dict()
__lowerCamelCase = feat_extract_second.to_dict()
__lowerCamelCase = dict_first.pop('''mel_filters''' )
__lowerCamelCase = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_snake_case , _snake_case ) )
self.assertEqual(_snake_case , _snake_case )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase = os.path.join(_snake_case , '''feat_extract.json''' )
feat_extract_first.to_json_file(_snake_case )
__lowerCamelCase = self.feature_extraction_class.from_json_file(_snake_case )
__lowerCamelCase = feat_extract_first.to_dict()
__lowerCamelCase = feat_extract_second.to_dict()
__lowerCamelCase = dict_first.pop('''mel_filters''' )
__lowerCamelCase = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_snake_case , _snake_case ) )
self.assertEqual(_snake_case , _snake_case )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
__lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__lowerCamelCase = [np.asarray(_snake_case ) for speech_input in speech_inputs]
# Test not batched input
__lowerCamelCase = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
__lowerCamelCase = feature_extractor(_snake_case , return_tensors='''np''' , sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
__lowerCamelCase = feature_extractor(
_snake_case , return_tensors='''np''' , sampling_rate=4_41_00 , mask_audio=_snake_case ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
__lowerCamelCase = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
__lowerCamelCase = np.asarray(_snake_case )
__lowerCamelCase = feature_extractor(_snake_case , return_tensors='''np''' , sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def _lowerCamelCase ( self , _snake_case ):
"""simple docstring"""
__lowerCamelCase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
__lowerCamelCase = ds.sort('''id''' ).select(range(_snake_case ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self._load_datasamples(1 )
__lowerCamelCase = TvltFeatureExtractor()
__lowerCamelCase = feature_extractor(_snake_case , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_92, 1_28) )
__lowerCamelCase = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _snake_case , atol=1E-4 ) )
| 575 |
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_UpperCamelCase : Dict =2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_UpperCamelCase : Dict =50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_UpperCamelCase : Tuple =0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def lowerCamelCase_ ( A_ , A_ ):
__lowerCamelCase = len([g for position, g in enumerate(A_ ) if g == main_target[position]] )
return (item, float(A_ ))
def lowerCamelCase_ ( A_ , A_ ):
__lowerCamelCase = random.randint(0 , len(A_ ) - 1 )
__lowerCamelCase = parent_a[:random_slice] + parent_a[random_slice:]
__lowerCamelCase = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowerCamelCase_ ( A_ , A_ ):
__lowerCamelCase = list(A_ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__lowerCamelCase = random.choice(A_ )
return "".join(A_ )
def lowerCamelCase_ ( A_ , A_ , A_ , ):
__lowerCamelCase = []
# Generate more children proportionally to the fitness score.
__lowerCamelCase = int(parent_a[1] * 1_00 ) + 1
__lowerCamelCase = 10 if child_n >= 10 else child_n
for _ in range(A_ ):
__lowerCamelCase = population_score[random.randint(0 , A_ )][0]
__lowerCamelCase , __lowerCamelCase = crossover(parent_a[0] , A_ )
# Append new string to the population list.
pop.append(mutate(A_ , A_ ) )
pop.append(mutate(A_ , A_ ) )
return pop
def lowerCamelCase_ ( A_ , A_ , A_ = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
__lowerCamelCase = f'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(A_ )
# Verify that the target contains no genes besides the ones inside genes variable.
__lowerCamelCase = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__lowerCamelCase = f'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(A_ )
# Generate random starting population.
__lowerCamelCase = []
for _ in range(A_ ):
population.append(''''''.join([random.choice(A_ ) for i in range(len(A_ ) )] ) )
# Just some logs to know what the algorithms is doing.
__lowerCamelCase , __lowerCamelCase = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(A_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__lowerCamelCase = [evaluate(A_ , A_ ) for item in population]
# Check if there is a matching evolution.
__lowerCamelCase = sorted(A_ , key=lambda A_ : x[1] , reverse=A_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'''\nGeneration: {generation}'''
f'''\nTotal Population:{total_population}'''
f'''\nBest score: {population_score[0][1]}'''
f'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__lowerCamelCase = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(A_ )
# Normalize population score to be between 0 and 1.
__lowerCamelCase = [
(item, score / len(A_ )) for item, score in population_score
]
# This is selection
for i in range(A_ ):
population.extend(select(population_score[int(A_ )] , A_ , A_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(A_ ) > N_POPULATION:
break
if __name__ == "__main__":
_UpperCamelCase : Optional[int] =(
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
_UpperCamelCase : Union[str, Any] =list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Tuple =basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 575 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__A : Dict = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
@dataclass
class __A :
lowerCAmelCase_ : Optional[str] = field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} )
lowerCAmelCase_ : Optional[str] = field(
default=lowerCAmelCase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowerCAmelCase_ : Optional[str] = field(
default=lowerCAmelCase , metadata={"help": "The column name of the images in the files."} )
lowerCAmelCase_ : Optional[str] = field(default=lowerCAmelCase , metadata={"help": "A folder containing the training data."} )
lowerCAmelCase_ : Optional[str] = field(default=lowerCAmelCase , metadata={"help": "A folder containing the validation data."} )
lowerCAmelCase_ : Optional[float] = field(
default=0.1_5 , metadata={"help": "Percent to split off of train for validation."} )
lowerCAmelCase_ : Optional[int] = field(
default=lowerCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowerCAmelCase_ : Optional[int] = field(
default=lowerCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def lowercase__ ( self : str ):
lowerCAmelCase : List[Any] = {}
if self.train_dir is not None:
lowerCAmelCase : Dict = self.train_dir
if self.validation_dir is not None:
lowerCAmelCase : str = self.validation_dir
lowerCAmelCase : int = data_files if data_files else None
@dataclass
class __A :
lowerCAmelCase_ : str = field(
default=lowerCAmelCase , metadata={
"help": (
"The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."
)
} , )
lowerCAmelCase_ : Optional[str] = field(
default=lowerCAmelCase , metadata={"help": "Pretrained config name or path if not the same as model_name_or_path"} )
lowerCAmelCase_ : Optional[str] = field(
default=lowerCAmelCase , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
lowerCAmelCase_ : Optional[str] = field(
default=lowerCAmelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
lowerCAmelCase_ : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowerCAmelCase_ : str = field(default=lowerCAmelCase , metadata={"help": "Name or path of preprocessor config."} )
lowerCAmelCase_ : bool = field(
default=lowerCAmelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
lowerCAmelCase_ : float = field(
default=0.7_5 , metadata={"help": "The ratio of the number of masked tokens in the input sequence."} )
lowerCAmelCase_ : bool = field(
default=lowerCAmelCase , metadata={"help": "Whether or not to train with normalized pixel values as target."} )
@dataclass
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : float = field(
default=1E-3 , metadata={"help": "Base learning rate: absolute_lr = base_lr * total_batch_size / 256."} )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase : Optional[int] = torch.stack([example['pixel_values'] for example in examples] )
return {"pixel_values": pixel_values}
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase : str = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[int] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mae', _UpperCAmelCase, _UpperCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCAmelCase : List[Any] = training_args.get_process_log_level()
logger.setLevel(_UpperCAmelCase )
transformers.utils.logging.set_verbosity(_UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
lowerCAmelCase : str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase : List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
lowerCAmelCase : Optional[Any] = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, data_files=data_args.data_files, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
# If we don't have a validation split, split off a percentage of train as validation.
lowerCAmelCase : Optional[Any] = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split, _UpperCAmelCase ) and data_args.train_val_split > 0.0:
lowerCAmelCase : Union[str, Any] = ds['train'].train_test_split(data_args.train_val_split )
lowerCAmelCase : Any = split['train']
lowerCAmelCase : Tuple = split['test']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase : str = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowerCAmelCase : Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.config_name, **_UpperCAmelCase )
elif model_args.model_name_or_path:
lowerCAmelCase : List[str] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path, **_UpperCAmelCase )
else:
lowerCAmelCase : int = ViTMAEConfig()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(f"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(f"New config: {config}" )
# adapt config
config.update(
{
'mask_ratio': model_args.mask_ratio,
'norm_pix_loss': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
lowerCAmelCase : int = ViTImageProcessor.from_pretrained(model_args.image_processor_name, **_UpperCAmelCase )
elif model_args.model_name_or_path:
lowerCAmelCase : Optional[Any] = ViTImageProcessor.from_pretrained(model_args.model_name_or_path, **_UpperCAmelCase )
else:
lowerCAmelCase : Optional[int] = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
lowerCAmelCase : Optional[int] = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path, from_tf=bool('.ckpt' in model_args.model_name_or_path ), config=_UpperCAmelCase, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
else:
logger.info('Training new model from scratch' )
lowerCAmelCase : Any = ViTMAEForPreTraining(_UpperCAmelCase )
if training_args.do_train:
lowerCAmelCase : Dict = ds['train'].column_names
else:
lowerCAmelCase : int = ds['validation'].column_names
if data_args.image_column_name is not None:
lowerCAmelCase : Optional[Any] = data_args.image_column_name
elif "image" in column_names:
lowerCAmelCase : Any = 'image'
elif "img" in column_names:
lowerCAmelCase : Dict = 'img'
else:
lowerCAmelCase : List[str] = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
lowerCAmelCase : Tuple = image_processor.size['shortest_edge']
else:
lowerCAmelCase : Tuple = (image_processor.size['height'], image_processor.size['width'])
lowerCAmelCase : str = Compose(
[
Lambda(lambda _UpperCAmelCase : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(_UpperCAmelCase, scale=(0.2, 1.0), interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean, std=image_processor.image_std ),
] )
def preprocess_images(_UpperCAmelCase ):
lowerCAmelCase : Optional[int] = [transforms(_UpperCAmelCase ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
lowerCAmelCase : List[Any] = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_UpperCAmelCase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
lowerCAmelCase : List[Any] = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_UpperCAmelCase )
# Compute absolute learning rate
lowerCAmelCase : Dict = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
lowerCAmelCase : Optional[Any] = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
lowerCAmelCase : List[Any] = Trainer(
model=_UpperCAmelCase, args=_UpperCAmelCase, train_dataset=ds['train'] if training_args.do_train else None, eval_dataset=ds['validation'] if training_args.do_eval else None, tokenizer=_UpperCAmelCase, data_collator=_UpperCAmelCase, )
# Training
if training_args.do_train:
lowerCAmelCase : str = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase : int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase : Dict = last_checkpoint
lowerCAmelCase : Any = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
trainer.save_model()
trainer.log_metrics('train', train_result.metrics )
trainer.save_metrics('train', train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowerCAmelCase : Any = trainer.evaluate()
trainer.log_metrics('eval', _UpperCAmelCase )
trainer.save_metrics('eval', _UpperCAmelCase )
# Write model card and (optionally) push to hub
lowerCAmelCase : Optional[int] = {
'tasks': 'masked-auto-encoding',
'dataset': data_args.dataset_name,
'tags': ['masked-auto-encoding'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCAmelCase )
else:
trainer.create_model_card(**_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> int:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 343 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
__A : List[Any] = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class __A ( datasets.BuilderConfig ):
lowerCAmelCase_ : Optional[datasets.Features] = None
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, ) -> Union[str, Any]:
'''simple docstring'''
import pyspark
def generate_fn():
lowerCAmelCase : str = df.select('*', pyspark.sql.functions.spark_partition_id().alias('part_id' ) )
for partition_id in partition_order:
lowerCAmelCase : List[Any] = df_with_partition_id.select('*' ).where(f"part_id = {partition_id}" ).drop('part_id' )
lowerCAmelCase : Optional[Any] = partition_df.collect()
lowerCAmelCase : Optional[Any] = 0
for row in rows:
yield f"{partition_id}_{row_id}", row.asDict()
row_id += 1
return generate_fn
class __A ( _BaseExamplesIterable ):
def __init__( self : Optional[Any] , UpperCAmelCase_ : "pyspark.sql.DataFrame" , UpperCAmelCase_ : List[Any]=None , ):
lowerCAmelCase : Optional[Any] = df
lowerCAmelCase : Any = partition_order or range(self.df.rdd.getNumPartitions() )
lowerCAmelCase : Union[str, Any] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : Union[str, Any] ):
yield from self.generate_examples_fn()
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : np.random.Generator ):
lowerCAmelCase : Union[str, Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(UpperCAmelCase_ )
return SparkExamplesIterable(self.df , partition_order=UpperCAmelCase_ )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
lowerCAmelCase : List[Any] = self.split_shard_indices_by_worker(UpperCAmelCase_ , UpperCAmelCase_ )
return SparkExamplesIterable(self.df , partition_order=UpperCAmelCase_ )
@property
def lowercase__ ( self : Optional[Any] ):
return len(self.partition_order )
class __A ( datasets.DatasetBuilder ):
lowerCAmelCase_ : Any = SparkConfig
def __init__( self : List[Any] , UpperCAmelCase_ : "pyspark.sql.DataFrame" , UpperCAmelCase_ : str = None , UpperCAmelCase_ : str = None , **UpperCAmelCase_ : Optional[Any] , ):
import pyspark
lowerCAmelCase : Tuple = pyspark.sql.SparkSession.builder.getOrCreate()
lowerCAmelCase : Dict = df
lowerCAmelCase : Dict = working_dir
super().__init__(
cache_dir=UpperCAmelCase_ , config_name=str(self.df.semanticHash() ) , **UpperCAmelCase_ , )
def lowercase__ ( self : int ):
# Returns the path of the created file.
def create_cache_and_write_probe(UpperCAmelCase_ : Union[str, Any] ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=UpperCAmelCase_ )
lowerCAmelCase : Any = os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(UpperCAmelCase_ , 'a' )
return [probe_file]
if self._spark.conf.get('spark.master' , '' ).startswith('local' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowerCAmelCase : List[str] = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(UpperCAmelCase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' )
def lowercase__ ( self : Optional[int] ):
return datasets.DatasetInfo(features=self.config.features )
def lowercase__ ( self : Tuple , UpperCAmelCase_ : datasets.download.download_manager.DownloadManager ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Union[str, Any] ):
import pyspark
def get_arrow_batch_size(UpperCAmelCase_ : Tuple ):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} )
lowerCAmelCase : int = self.df.count()
lowerCAmelCase : str = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowerCAmelCase : Tuple = (
self.df.limit(UpperCAmelCase_ )
.repartition(1 )
.mapInArrow(UpperCAmelCase_ , 'batch_bytes: long' )
.agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowerCAmelCase : Tuple = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowerCAmelCase : List[str] = min(UpperCAmelCase_ , int(approx_total_size / max_shard_size ) )
lowerCAmelCase : List[Any] = self.df.repartition(UpperCAmelCase_ )
def lowercase__ ( self : int , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : int , ):
import pyspark
lowerCAmelCase : Tuple = ParquetWriter if file_format == 'parquet' else ArrowWriter
lowerCAmelCase : int = os.path.join(self._working_dir , os.path.basename(UpperCAmelCase_ ) ) if self._working_dir else fpath
lowerCAmelCase : str = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowerCAmelCase : int = self.config.features
lowerCAmelCase : Union[str, Any] = self._writer_batch_size
lowerCAmelCase : Tuple = self._fs.storage_options
def write_arrow(UpperCAmelCase_ : int ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowerCAmelCase : List[Any] = pyspark.TaskContext().taskAttemptId()
lowerCAmelCase : str = next(UpperCAmelCase_ , UpperCAmelCase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , )
lowerCAmelCase : str = 0
lowerCAmelCase : Union[str, Any] = writer_class(
features=UpperCAmelCase_ , path=working_fpath.replace('SSSSS' , f"{shard_id:05d}" ).replace('TTTTT' , f"{task_id:05d}" ) , writer_batch_size=UpperCAmelCase_ , storage_options=UpperCAmelCase_ , embed_local_files=UpperCAmelCase_ , )
lowerCAmelCase : Union[str, Any] = pa.Table.from_batches([first_batch] )
writer.write_table(UpperCAmelCase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowerCAmelCase , lowerCAmelCase : Any = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
shard_id += 1
lowerCAmelCase : Tuple = writer_class(
features=writer._features , path=working_fpath.replace('SSSSS' , f"{shard_id:05d}" ).replace('TTTTT' , f"{task_id:05d}" ) , writer_batch_size=UpperCAmelCase_ , storage_options=UpperCAmelCase_ , embed_local_files=UpperCAmelCase_ , )
lowerCAmelCase : str = pa.Table.from_batches([batch] )
writer.write_table(UpperCAmelCase_ )
if writer._num_bytes > 0:
lowerCAmelCase , lowerCAmelCase : int = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(UpperCAmelCase_ ) ):
lowerCAmelCase : Optional[int] = os.path.join(os.path.dirname(UpperCAmelCase_ ) , os.path.basename(UpperCAmelCase_ ) )
shutil.move(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = (
self.df.mapInArrow(UpperCAmelCase_ , 'task_id: long, num_examples: long, num_bytes: long' )
.groupBy('task_id' )
.agg(
pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) , pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) , pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) , pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def lowercase__ ( self : int , UpperCAmelCase_ : "datasets.SplitGenerator" , UpperCAmelCase_ : str = "arrow" , UpperCAmelCase_ : Optional[Union[str, int]] = None , UpperCAmelCase_ : Optional[int] = None , **UpperCAmelCase_ : str , ):
self._validate_cache_dir()
lowerCAmelCase : List[Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = not is_remote_filesystem(self._fs )
lowerCAmelCase : Union[str, Any] = os.path.join if is_local else posixpath.join
lowerCAmelCase : List[Any] = '-TTTTT-SSSSS-of-NNNNN'
lowerCAmelCase : Optional[Any] = f"{self.name}-{split_generator.name}{SUFFIX}.{file_format}"
lowerCAmelCase : int = path_join(self._output_dir , UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : Dict = 0
lowerCAmelCase : str = 0
lowerCAmelCase : Optional[Any] = []
lowerCAmelCase : List[Any] = []
for task_id, content in self._prepare_split_single(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) : Dict = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(UpperCAmelCase_ )
lowerCAmelCase : Dict = total_num_examples
lowerCAmelCase : List[Any] = total_num_bytes
# should rename everything at the end
logger.debug(f"Renaming {total_shards} shards." )
if total_shards > 1:
lowerCAmelCase : Tuple = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowerCAmelCase : str = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , ):
rename(
UpperCAmelCase_ , fpath.replace('SSSSS' , f"{shard_id:05d}" ).replace('TTTTT' , f"{task_id:05d}" ) , fpath.replace('TTTTT-SSSSS' , f"{global_shard_id:05d}" ).replace('NNNNN' , f"{total_shards:05d}" ) , )
lowerCAmelCase : int = []
lowerCAmelCase : List[Any] = 0
for i in range(len(UpperCAmelCase_ ) ):
lowerCAmelCase , lowerCAmelCase : Dict = task_id_and_num_shards[i]
for shard_id in range(UpperCAmelCase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(UpperCAmelCase_ , len(UpperCAmelCase_ ) ).map(lambda UpperCAmelCase_ : _rename_shard(*UpperCAmelCase_ ) ).collect()
else:
# don't use any pattern
lowerCAmelCase : int = 0
lowerCAmelCase : Union[str, Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS' , f"{shard_id:05d}" ).replace('TTTTT' , f"{task_id:05d}" ) , fpath.replace(UpperCAmelCase_ , '' ) , )
def lowercase__ ( self : Tuple , UpperCAmelCase_ : "datasets.SplitGenerator" , ):
return SparkExamplesIterable(self.df )
| 343 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
"configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
"tokenization_ctrl": ["CTRLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 639 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["YolosFeatureExtractor"]
UpperCAmelCase__ = ["YolosImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST",
"YolosForObjectDetection",
"YolosModel",
"YolosPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 639 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a :Union[str, Any] = logging.get_logger(__name__)
a :int = {"vocab_file": "sentencepiece.bpe.model"}
a :Optional[int] = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
}
}
a :Optional[Any] = {
"camembert-base": 512,
}
a :Tuple = "▁"
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :List[str] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE :Tuple = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE :Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , _a , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a=["<s>NOTUSED", "</s>NOTUSED"] , _a = None , **_a , ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
SCREAMING_SNAKE_CASE__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , additional_special_tokens=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
SCREAMING_SNAKE_CASE__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_a ) )
SCREAMING_SNAKE_CASE__ : Any = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""<s>NOTUSED""": 0, """<pad>""": 1, """</s>NOTUSED""": 2, """<unk>""": 3}
SCREAMING_SNAKE_CASE__ : str = len(self.fairseq_tokens_to_ids )
SCREAMING_SNAKE_CASE__ : Any = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
SCREAMING_SNAKE_CASE__ : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def _a ( self , _a , _a = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _a ( self , _a , _a = None , _a = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def _a ( self , _a , _a = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _a ( self ) -> List[str]:
"""simple docstring"""
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _a ( self , _a ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_a , out_type=_a )
def _a ( self , _a ) -> Tuple:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(_a ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(_a )
def _a ( self , _a ) -> Dict:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _a ( self , _a ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = []
SCREAMING_SNAKE_CASE__ : Optional[int] = """"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
SCREAMING_SNAKE_CASE__ : Tuple = True
SCREAMING_SNAKE_CASE__ : str = []
else:
current_sub_tokens.append(_a )
SCREAMING_SNAKE_CASE__ : List[str] = False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def __getstate__( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ : Tuple = None
return state
def __setstate__( self , _a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
SCREAMING_SNAKE_CASE__ : Dict = {}
SCREAMING_SNAKE_CASE__ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self , _a , _a = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , """wb""" ) as fi:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 680 |
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
a :List[Any] = ""
a :Union[str, Any] = ""
a :List[str] = ""
a :str = 1 # (0 is vertical, 1 is horizontal)
def _lowercase ( ) -> None:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = get_dataset(__lowerCAmelCase , __lowerCAmelCase )
print("""Processing...""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = update_image_and_anno(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for index, image in enumerate(__lowerCAmelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
SCREAMING_SNAKE_CASE__ : List[Any] = random_chars(32 )
SCREAMING_SNAKE_CASE__ : List[str] = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0]
SCREAMING_SNAKE_CASE__ : List[str] = F'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(F'''/{file_root}.jpg''' , __lowerCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Success {index+1}/{len(__lowerCAmelCase )} with {file_name}''' )
SCREAMING_SNAKE_CASE__ : int = []
for anno in new_annos[index]:
SCREAMING_SNAKE_CASE__ : Tuple = F'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(__lowerCAmelCase )
with open(F'''/{file_root}.txt''' , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> tuple[list, list]:
SCREAMING_SNAKE_CASE__ : Any = []
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for label_file in glob.glob(os.path.join(__lowerCAmelCase , """*.txt""" ) ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(__lowerCAmelCase ) as in_file:
SCREAMING_SNAKE_CASE__ : Dict = in_file.readlines()
SCREAMING_SNAKE_CASE__ : int = os.path.join(__lowerCAmelCase , F'''{label_name}.jpg''' )
SCREAMING_SNAKE_CASE__ : int = []
for obj_list in obj_lists:
SCREAMING_SNAKE_CASE__ : Optional[int] = obj_list.rstrip("""\n""" ).split(""" """ )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__lowerCAmelCase )
labels.append(__lowerCAmelCase )
return img_paths, labels
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1 ) -> tuple[list, list, list]:
SCREAMING_SNAKE_CASE__ : Dict = []
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for idx in range(len(__lowerCAmelCase ) ):
SCREAMING_SNAKE_CASE__ : List[str] = []
SCREAMING_SNAKE_CASE__ : str = img_list[idx]
path_list.append(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = anno_list[idx]
SCREAMING_SNAKE_CASE__ : Tuple = cva.imread(__lowerCAmelCase )
if flip_type == 1:
SCREAMING_SNAKE_CASE__ : int = cva.flip(__lowerCAmelCase , __lowerCAmelCase )
for bbox in img_annos:
SCREAMING_SNAKE_CASE__ : Optional[int] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
SCREAMING_SNAKE_CASE__ : Any = cva.flip(__lowerCAmelCase , __lowerCAmelCase )
for bbox in img_annos:
SCREAMING_SNAKE_CASE__ : List[Any] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__lowerCAmelCase )
new_imgs_list.append(__lowerCAmelCase )
return new_imgs_list, new_annos_lists, path_list
def _lowercase ( __lowerCAmelCase = 32 ) -> str:
assert number_char > 1, "The number of character should greater than 1"
SCREAMING_SNAKE_CASE__ : List[str] = ascii_lowercase + digits
return "".join(random.choice(__lowerCAmelCase ) for _ in range(__lowerCAmelCase ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 680 | 1 |
'''simple docstring'''
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : str ):
'''simple docstring'''
_a : str = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) ,1 )
self.assertEqual(x.component(2 ) ,3 )
_a : str = Vector()
def __lowercase ( self : int ):
'''simple docstring'''
_a : Dict = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(_a ) ,'(0,0,0,0,0,1)' )
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : Dict = Vector([1, 2, 3, 4] )
self.assertEqual(len(_a ) ,4 )
def __lowercase ( self : int ):
'''simple docstring'''
_a : List[str] = Vector([1, 2] )
_a : int = Vector([1, 2, 3, 4, 5] )
_a : List[Any] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
_a : int = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() ,2.236 ,3 )
self.assertAlmostEqual(y.euclidean_length() ,7.416 ,3 )
self.assertEqual(z.euclidean_length() ,0 )
self.assertAlmostEqual(w.euclidean_length() ,7.616 ,3 )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : int = Vector([1, 2, 3] )
_a : Tuple = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) ,2 )
self.assertEqual((x + y).component(1 ) ,3 )
self.assertEqual((x + y).component(2 ) ,4 )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : Optional[Any] = Vector([1, 2, 3] )
_a : Tuple = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) ,0 )
self.assertEqual((x - y).component(1 ) ,1 )
self.assertEqual((x - y).component(2 ) ,2 )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : Dict = Vector([1, 2, 3] )
_a : Dict = Vector([2, -1, 4] ) # for test of dot product
_a : Optional[int] = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) ,'(3.0,6.0,9.0)' )
self.assertEqual((a * b) ,0 )
def __lowercase ( self : List[str] ):
'''simple docstring'''
self.assertEqual(str(zero_vector(10 ) ).count('0' ) ,10 )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
self.assertEqual(str(unit_basis_vector(3 ,1 ) ) ,'(0,1,0)' )
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Tuple = Vector([1, 2, 3] )
_a : Optional[Any] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 ,_a ,_a ) ) ,'(3,4,7)' )
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : Any = Vector([1, 0, 0, 0, 0, 0] )
_a : Optional[int] = x.copy()
self.assertEqual(str(_a ) ,str(_a ) )
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : List[Any] = Vector([1, 0, 0] )
x.change_component(0 ,0 )
x.change_component(1 ,1 )
self.assertEqual(str(_a ) ,'(0,1,0)' )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' ,str(_a ) )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Optional[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
_a : Optional[Any] = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] ,a.minor(_a ,_a ) )
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
_a : List[str] = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] ,a.cofactor(_a ,_a ) )
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
self.assertEqual(-5 ,a.determinant() )
def __lowercase ( self : int ):
'''simple docstring'''
_a : List[str] = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] ,3 ,3 )
_a : Dict = Vector([1, 2, 3] )
self.assertEqual('(14,32,50)' ,str(a * x ) )
self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' ,str(a * 2 ) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Optional[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
a.change_component(0 ,2 ,5 )
self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' ,str(_a ) )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
self.assertEqual(7 ,a.component(2 ,1 ) ,0.01 )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
_a : str = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] ,3 ,3 )
self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' ,str(a + b ) )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
_a : Union[str, Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] ,3 ,3 )
self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' ,str(a - b ) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
self.assertEqual(
'|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' ,str(square_zero_matrix(5 ) ) ,)
if __name__ == "__main__":
unittest.main()
| 708 |
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
__lowerCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : int = '''AutoTokenizer'''
__UpperCAmelCase : Optional[Any] = ['''tokenizer''']
__UpperCAmelCase : str = {
'''semantic_prompt''': 1,
'''coarse_prompt''': 2,
'''fine_prompt''': 2,
}
def __init__( self : Union[str, Any] ,_a : Union[str, Any] ,_a : Dict=None ):
'''simple docstring'''
super().__init__(_a )
_a : List[str] = speaker_embeddings
@classmethod
def __lowercase ( cls : Any ,_a : Optional[int] ,_a : Union[str, Any]="speaker_embeddings_path.json" ,**_a : Union[str, Any] ):
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
_a : Tuple = get_file_from_repo(
_a ,_a ,subfolder=kwargs.pop('subfolder' ,_a ) ,cache_dir=kwargs.pop('cache_dir' ,_a ) ,force_download=kwargs.pop('force_download' ,_a ) ,proxies=kwargs.pop('proxies' ,_a ) ,resume_download=kwargs.pop('resume_download' ,_a ) ,local_files_only=kwargs.pop('local_files_only' ,_a ) ,use_auth_token=kwargs.pop('use_auth_token' ,_a ) ,revision=kwargs.pop('revision' ,_a ) ,)
if speaker_embeddings_path is None:
logger.warning(
F"""`{os.path.join(_a ,_a )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.""" )
_a : List[Any] = None
else:
with open(_a ) as speaker_embeddings_json:
_a : List[str] = json.load(_a )
else:
_a : str = None
_a : Any = AutoTokenizer.from_pretrained(_a ,**_a )
return cls(tokenizer=_a ,speaker_embeddings=_a )
def __lowercase ( self : List[str] ,_a : Tuple ,_a : Any="speaker_embeddings_path.json" ,_a : Optional[int]="speaker_embeddings" ,_a : bool = False ,**_a : Optional[int] ,):
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_a ,_a ,'v2' ) ,exist_ok=_a )
_a : Optional[Any] = {}
_a : List[str] = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_a : Any = self._load_voice_preset(_a )
_a : Tuple = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] ,_a ,F"""{prompt_key}_{key}""" ) ,voice_preset[key] ,allow_pickle=_a ,)
_a : Dict = os.path.join(_a ,F"""{prompt_key}_{key}.npy""" )
_a : Any = tmp_dict
with open(os.path.join(_a ,_a ) ,'w' ) as fp:
json.dump(_a ,_a )
super().save_pretrained(_a ,_a ,**_a )
def __lowercase ( self : Tuple ,_a : str = None ,**_a : List[Any] ):
'''simple docstring'''
_a : Optional[Any] = self.speaker_embeddings[voice_preset]
_a : Optional[Any] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F"""Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].""" )
_a : List[Any] = get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' ,'/' ) ,voice_preset_paths[key] ,subfolder=kwargs.pop('subfolder' ,_a ) ,cache_dir=kwargs.pop('cache_dir' ,_a ) ,force_download=kwargs.pop('force_download' ,_a ) ,proxies=kwargs.pop('proxies' ,_a ) ,resume_download=kwargs.pop('resume_download' ,_a ) ,local_files_only=kwargs.pop('local_files_only' ,_a ) ,use_auth_token=kwargs.pop('use_auth_token' ,_a ) ,revision=kwargs.pop('revision' ,_a ) ,)
if path is None:
raise ValueError(
F"""`{os.path.join(self.speaker_embeddings.get('repo_or_path' ,'/' ) ,voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.""" )
_a : Tuple = np.load(_a )
return voice_preset_dict
def __lowercase ( self : List[Any] ,_a : Optional[dict] = None ):
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F"""Voice preset unrecognized, missing {key} as a key.""" )
if not isinstance(voice_preset[key] ,np.ndarray ):
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
def __call__( self : Any ,_a : List[str]=None ,_a : Tuple=None ,_a : Tuple="pt" ,_a : Any=256 ,_a : Optional[Any]=False ,_a : List[str]=True ,_a : Optional[Any]=False ,**_a : Dict ,):
'''simple docstring'''
if voice_preset is not None and not isinstance(_a ,_a ):
if (
isinstance(_a ,_a )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_a : Union[str, Any] = self._load_voice_preset(_a )
else:
if isinstance(_a ,_a ) and not voice_preset.endswith('.npz' ):
_a : str = voice_preset + '.npz'
_a : Optional[int] = np.load(_a )
if voice_preset is not None:
self._validate_voice_preset_dict(_a ,**_a )
_a : List[str] = BatchFeature(data=_a ,tensor_type=_a )
_a : List[Any] = self.tokenizer(
_a ,return_tensors=_a ,padding='max_length' ,max_length=_a ,return_attention_mask=_a ,return_token_type_ids=_a ,add_special_tokens=_a ,**_a ,)
if voice_preset is not None:
_a : Dict = voice_preset
return encoded_text
| 319 | 0 |
"""simple docstring"""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
lowerCAmelCase = _symbol_database.Default()
lowerCAmelCase = _descriptor_pool.Default().AddSerializedFile(
b"""\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"""
)
lowerCAmelCase = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, """sentencepiece_model_pb2""", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
lowerCAmelCase = None
lowerCAmelCase = b"""H\003"""
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
lowerCAmelCase = 45
lowerCAmelCase = 15_81
lowerCAmelCase = 15_17
lowerCAmelCase = 15_70
lowerCAmelCase = 15_84
lowerCAmelCase = 17_93
lowerCAmelCase = 17_95
lowerCAmelCase = 19_16
lowerCAmelCase = 18_64
lowerCAmelCase = 19_05
lowerCAmelCase = 19_19
lowerCAmelCase = 24_29
lowerCAmelCase = 22_08
lowerCAmelCase = 24_18
lowerCAmelCase = 23_23
lowerCAmelCase = 24_07
# @@protoc_insertion_point(module_scope) | 174 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
lowerCAmelCase = {
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
lowerCAmelCase = {"""facebook/blenderbot-3B""": 1_28}
class A_ ( A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ = BlenderbotTokenizer
def __init__( self :Tuple , lowerCamelCase_ :List[str]=None , lowerCamelCase_ :Optional[int]=None , lowerCamelCase_ :Optional[int]=None , lowerCamelCase_ :str="replace" , lowerCamelCase_ :Dict="<s>" , lowerCamelCase_ :List[str]="</s>" , lowerCamelCase_ :Dict="</s>" , lowerCamelCase_ :List[str]="<s>" , lowerCamelCase_ :int="<unk>" , lowerCamelCase_ :List[Any]="<pad>" , lowerCamelCase_ :Optional[Any]="<mask>" , lowerCamelCase_ :Optional[Any]=False , lowerCamelCase_ :Union[str, Any]=True , **lowerCamelCase_ :Dict , ):
"""simple docstring"""
super().__init__(
lowerCamelCase_ , lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , errors=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , trim_offsets=lowerCamelCase_ , **lowerCamelCase_ , )
lowerCamelCase__ : int =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowerCamelCase_ ) != add_prefix_space:
lowerCamelCase__ : Tuple =getattr(lowerCamelCase_ , pre_tok_state.pop('type' ) )
lowerCamelCase__ : Tuple =add_prefix_space
lowerCamelCase__ : Tuple =pre_tok_class(**lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] =add_prefix_space
lowerCamelCase__ : int ='post_processor'
lowerCamelCase__ : int =getattr(self.backend_tokenizer , lowerCamelCase_ , lowerCamelCase_ )
if tokenizer_component_instance:
lowerCamelCase__ : List[str] =json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCamelCase__ : Dict =tuple(state['sep'] )
if "cls" in state:
lowerCamelCase__ : int =tuple(state['cls'] )
lowerCamelCase__ : Tuple =False
if state.get('add_prefix_space' , lowerCamelCase_ ) != add_prefix_space:
lowerCamelCase__ : str =add_prefix_space
lowerCamelCase__ : List[Any] =True
if state.get('trim_offsets' , lowerCamelCase_ ) != trim_offsets:
lowerCamelCase__ : Tuple =trim_offsets
lowerCamelCase__ : List[Any] =True
if changes_to_apply:
lowerCamelCase__ : Dict =getattr(lowerCamelCase_ , state.pop('type' ) )
lowerCamelCase__ : List[str] =component_class(**lowerCamelCase_ )
setattr(self.backend_tokenizer , lowerCamelCase_ , lowerCamelCase_ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCAmelCase__ ( self :Tuple , lowerCamelCase_ :Dict ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else value
lowerCamelCase__ : Dict =value
def UpperCAmelCase__ ( self :str , *lowerCamelCase_ :Any , **lowerCamelCase_ :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : List[str] =kwargs.get('is_split_into_words' , lowerCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase__ ( self :Optional[int] , *lowerCamelCase_ :Optional[Any] , **lowerCamelCase_ :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =kwargs.get('is_split_into_words' , lowerCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase__ ( self :int , lowerCamelCase_ :str , lowerCamelCase_ :Optional[str] = None ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =self._tokenizer.model.save(lowerCamelCase_ , name=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
def UpperCAmelCase__ ( self :Union[str, Any] , lowerCamelCase_ :List[int] , lowerCamelCase_ :Optional[List[int]] = None ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =[self.sep_token_id]
lowerCamelCase__ : str =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase__ ( self :Optional[Any] , lowerCamelCase_ :List[int] , lowerCamelCase_ :Optional[List[int]] = None ):
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self :str , lowerCamelCase_ :"Conversation" ):
"""simple docstring"""
lowerCamelCase__ : Tuple =[]
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] =' '.join(lowerCamelCase_ )
lowerCamelCase__ : int =self.encode(lowerCamelCase_ )
if len(lowerCamelCase_ ) > self.model_max_length:
lowerCamelCase__ : Dict =input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids | 174 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_a = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 29 |
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = 42
class __A ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
@register_to_config
def __init__( self , __lowerCAmelCase = 1_6 , __lowerCAmelCase = 8_8 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 1 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 3_2 , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = "geglu" , __lowerCAmelCase = True , __lowerCAmelCase = True , ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = attention_head_dim
lowerCamelCase__ = num_attention_heads * attention_head_dim
lowerCamelCase__ = in_channels
lowerCamelCase__ = torch.nn.GroupNorm(num_groups=__lowerCAmelCase , num_channels=__lowerCAmelCase , eps=1E-6 , affine=__lowerCAmelCase )
lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
# 3. Define transformers blocks
lowerCamelCase__ = nn.ModuleList(
[
BasicTransformerBlock(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , dropout=__lowerCAmelCase , cross_attention_dim=__lowerCAmelCase , activation_fn=__lowerCAmelCase , attention_bias=__lowerCAmelCase , double_self_attention=__lowerCAmelCase , norm_elementwise_affine=__lowerCAmelCase , )
for d in range(__lowerCAmelCase )
] )
lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=1 , __lowerCAmelCase=None , __lowerCAmelCase = True , ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = hidden_states.shape
lowerCamelCase__ = batch_frames // num_frames
lowerCamelCase__ = hidden_states
lowerCamelCase__ = hidden_states[None, :].reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
lowerCamelCase__ = self.norm(__lowerCAmelCase )
lowerCamelCase__ = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = self.proj_in(__lowerCAmelCase )
# 2. Blocks
for block in self.transformer_blocks:
lowerCamelCase__ = block(
__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , timestep=__lowerCAmelCase , cross_attention_kwargs=__lowerCAmelCase , class_labels=__lowerCAmelCase , )
# 3. Output
lowerCamelCase__ = self.proj_out(__lowerCAmelCase )
lowerCamelCase__ = (
hidden_states[None, None, :]
.reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
lowerCamelCase__ = hidden_states.reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=__lowerCAmelCase )
| 29 | 1 |
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowercase_ = get_logger(__name__)
class __A :
'''simple docstring'''
def __init__(self , A = None ) -> int:
"""simple docstring"""
_a = (
os.path.join(A , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
_a = Extractor
def a__ (self , A ) -> str:
"""simple docstring"""
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
_a = os.path.abspath(A )
return os.path.join(self.extract_dir , hash_url_to_filename(A ) )
def a__ (self , A , A ) -> bool:
"""simple docstring"""
return force_extract or (
not os.path.isfile(A ) and not (os.path.isdir(A ) and os.listdir(A ))
)
def a__ (self , A , A = False ) -> str:
"""simple docstring"""
_a = self.extractor.infer_extractor_format(A )
if not extractor_format:
return input_path
_a = self._get_output_path(A )
if self._do_extract(A , A ):
self.extractor.extract(A , A , A )
return output_path
class __A ( A ):
'''simple docstring'''
@classmethod
@abstractmethod
def a__ (cls , A , **A ) -> bool:
"""simple docstring"""
...
@staticmethod
@abstractmethod
def a__ (A , A ) -> None:
"""simple docstring"""
...
class __A ( A , A ):
'''simple docstring'''
__lowerCamelCase : List[bytes] = []
@staticmethod
def a__ (A , A ) -> int:
"""simple docstring"""
with open(A , '''rb''' ) as f:
return f.read(A )
@classmethod
def a__ (cls , A , A = b"" ) -> bool:
"""simple docstring"""
if not magic_number:
_a = max(len(A ) for cls_magic_number in cls.magic_numbers )
try:
_a = cls.read_magic_number(A , A )
except OSError:
return False
return any(magic_number.startswith(A ) for cls_magic_number in cls.magic_numbers )
class __A ( A ):
'''simple docstring'''
@classmethod
def a__ (cls , A , **A ) -> bool:
"""simple docstring"""
return tarfile.is_tarfile(A )
@staticmethod
def a__ (A , A ) -> List[Any]:
"""simple docstring"""
def resolved(A ) -> str:
return os.path.realpath(os.path.abspath(A ) )
def badpath(A , A ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(A , A ) ).startswith(A )
def badlink(A , A ) -> bool:
# Links are interpreted relative to the directory containing the link
_a = resolved(os.path.join(A , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=A )
_a = resolved(A )
for finfo in members:
if badpath(finfo.name , A ):
logger.error(f'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(A , A ):
logger.error(f'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(A , A ):
logger.error(f'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def a__ (A , A ) -> None:
"""simple docstring"""
os.makedirs(A , exist_ok=A )
_a = tarfile.open(A )
tar_file.extractall(A , members=TarExtractor.safemembers(A , A ) )
tar_file.close()
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : int = [b'\x1F\x8B']
@staticmethod
def a__ (A , A ) -> None:
"""simple docstring"""
with gzip.open(A , '''rb''' ) as gzip_file:
with open(A , '''wb''' ) as extracted_file:
shutil.copyfileobj(A , A )
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = [
b'PK\x03\x04',
b'PK\x05\x06', # empty archive
b'PK\x07\x08', # spanned archive
]
@classmethod
def a__ (cls , A , A = b"" ) -> bool:
"""simple docstring"""
if super().is_extractable(A , magic_number=A ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(A , '''rb''' ) as fp:
_a = _EndRecData(A )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
_a = fp.read(A ) # CD is where we expect it to be
if len(A ) == sizeCentralDir:
_a = struct.unpack(A , A ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def a__ (A , A ) -> None:
"""simple docstring"""
os.makedirs(A , exist_ok=A )
with zipfile.ZipFile(A , '''r''' ) as zip_file:
zip_file.extractall(A )
zip_file.close()
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : str = [b'\xFD\x37\x7A\x58\x5A\x00']
@staticmethod
def a__ (A , A ) -> None:
"""simple docstring"""
with lzma.open(A ) as compressed_file:
with open(A , '''wb''' ) as extracted_file:
shutil.copyfileobj(A , A )
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Any = [b'Rar!\x1a\x07\x00', b'Rar!\x1a\x07\x01\x00'] # RAR_ID # RAR5_ID
@staticmethod
def a__ (A , A ) -> None:
"""simple docstring"""
if not config.RARFILE_AVAILABLE:
raise ImportError('''Please pip install rarfile''' )
import rarfile
os.makedirs(A , exist_ok=A )
_a = rarfile.RarFile(A )
rf.extractall(A )
rf.close()
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Any = [b'\x28\xb5\x2F\xFD']
@staticmethod
def a__ (A , A ) -> None:
"""simple docstring"""
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('''Please pip install zstandard''' )
import zstandard as zstd
_a = zstd.ZstdDecompressor()
with open(A , '''rb''' ) as ifh, open(A , '''wb''' ) as ofh:
dctx.copy_stream(A , A )
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : List[str] = [b'\x42\x5A\x68']
@staticmethod
def a__ (A , A ) -> None:
"""simple docstring"""
with bza.open(A , '''rb''' ) as compressed_file:
with open(A , '''wb''' ) as extracted_file:
shutil.copyfileobj(A , A )
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Tuple = [b'\x37\x7A\xBC\xAF\x27\x1C']
@staticmethod
def a__ (A , A ) -> None:
"""simple docstring"""
if not config.PY7ZR_AVAILABLE:
raise ImportError('''Please pip install py7zr''' )
import pyazr
os.makedirs(A , exist_ok=A )
with pyazr.SevenZipFile(A , '''r''' ) as archive:
archive.extractall(A )
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = [b'\x04\x22\x4D\x18']
@staticmethod
def a__ (A , A ) -> None:
"""simple docstring"""
if not config.LZ4_AVAILABLE:
raise ImportError('''Please pip install lz4''' )
import lza.frame
with lza.frame.open(A , '''rb''' ) as compressed_file:
with open(A , '''wb''' ) as extracted_file:
shutil.copyfileobj(A , A )
class __A :
'''simple docstring'''
__lowerCamelCase : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def a__ (cls ) -> Tuple:
"""simple docstring"""
return max(
len(A )
for extractor in cls.extractors.values()
if issubclass(A , A )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def a__ (A , A ) -> Tuple:
"""simple docstring"""
try:
return MagicNumberBaseExtractor.read_magic_number(A , magic_number_length=A )
except OSError:
return b""
@classmethod
def a__ (cls , A , A = False ) -> bool:
"""simple docstring"""
warnings.warn(
'''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'infer_extractor_format\' instead.''' , category=A , )
_a = cls.infer_extractor_format(A )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def a__ (cls , A ) -> str: # <Added version="2.4.0"/>
"""simple docstring"""
_a = cls._get_magic_number_max_length()
_a = cls._read_magic_number(A , A )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(A , magic_number=A ):
return extractor_format
@classmethod
def a__ (cls , A , A , A = None , A = "deprecated" , ) -> None:
"""simple docstring"""
os.makedirs(os.path.dirname(A ) , exist_ok=A )
# Prevent parallel extractions
_a = str(Path(A ).with_suffix('''.lock''' ) )
with FileLock(A ):
shutil.rmtree(A , ignore_errors=A )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(A , A ): # passed as positional arg
warnings.warn(
'''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'extractor_format\' instead.''' , category=A , )
_a = extractor if extractor != '''deprecated''' else extractor_format
else:
_a = cls.extractors[extractor_format]
return extractor.extract(A , A )
else:
warnings.warn(
'''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '''
'''exception in 3.0.0.''' , category=A , )
for extractor in cls.extractors.values():
if extractor.is_extractable(A ):
return extractor.extract(A , A )
| 11 |
from __future__ import annotations
def A__ ( lowerCamelCase ) -> bool:
UpperCamelCase_: Optional[int] = len(lowerCamelCase )
# We need to create solution object to save path.
UpperCamelCase_: List[str] = [[0 for _ in range(lowerCamelCase )] for _ in range(lowerCamelCase )]
UpperCamelCase_: Dict = run_maze(lowerCamelCase , 0 , 0 , lowerCamelCase )
if solved:
print("""\n""".join(str(lowerCamelCase ) for row in solutions ) )
else:
print("""No solution exists!""" )
return solved
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> bool:
UpperCamelCase_: Dict = len(lowerCamelCase )
# Final check point.
if i == j == (size - 1):
UpperCamelCase_: Union[str, Any] = 1
return True
UpperCamelCase_: List[str] = (not i < 0) and (not j < 0) # Check lower bounds
UpperCamelCase_: List[str] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
UpperCamelCase_: Dict = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
UpperCamelCase_: Optional[Any] = 1
# check for directions
if (
run_maze(lowerCamelCase , i + 1 , lowerCamelCase , lowerCamelCase )
or run_maze(lowerCamelCase , lowerCamelCase , j + 1 , lowerCamelCase )
or run_maze(lowerCamelCase , i - 1 , lowerCamelCase , lowerCamelCase )
or run_maze(lowerCamelCase , lowerCamelCase , j - 1 , lowerCamelCase )
):
return True
UpperCamelCase_: Optional[Any] = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 548 | 0 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
SCREAMING_SNAKE_CASE_ = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
SCREAMING_SNAKE_CASE_ = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Dict ) -> List[str]:
_UpperCAmelCase : List[Any] = (images / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase : Tuple = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_UpperCAmelCase : Optional[int] = numpy_to_pil(lowerCAmelCase )
return images
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Union[str, Any] ) -> Union[str, Any]:
if images.ndim == 3:
_UpperCAmelCase : List[str] = images[None, ...]
_UpperCAmelCase : Union[str, Any] = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
_UpperCAmelCase : Optional[Any] = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
_UpperCAmelCase : Union[str, Any] = [Image.fromarray(lowerCAmelCase ) for image in images]
return pil_images
| 467 |
from __future__ import annotations
from collections.abc import Callable
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Callable[[int | float], int | float] , lowerCAmelCase: int | float , lowerCAmelCase: int | float , lowerCAmelCase: int = 100 , ) -> float:
_UpperCAmelCase : str = x_start
_UpperCAmelCase : Union[str, Any] = fnc(lowerCAmelCase )
_UpperCAmelCase : Tuple = 0.0
for _ in range(lowerCAmelCase ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
_UpperCAmelCase : Union[str, Any] = (x_end - x_start) / steps + xa
_UpperCAmelCase : Optional[Any] = fnc(lowerCAmelCase )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
_UpperCAmelCase : Any = xa
_UpperCAmelCase : Optional[Any] = fxa
return area
if __name__ == "__main__":
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[str] ) -> List[Any]:
return x**3 + x**2
print('f(x) = x^3 + x^2')
print('The area between the curve, x = -5, x = 5 and the x axis is:')
SCREAMING_SNAKE_CASE_ = 10
while i <= 100000:
print(F'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 10
| 467 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if len(__UpperCamelCase ) <= 1 or n <= 1:
return
insert_next(__UpperCamelCase , n - 1 )
rec_insertion_sort(__UpperCamelCase , n - 1 )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if index >= len(__UpperCamelCase ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
UpperCAmelCase__ , UpperCAmelCase__ : Any = (
collection[index],
collection[index - 1],
)
insert_next(__UpperCamelCase , index + 1 )
if __name__ == "__main__":
__UpperCAmelCase = input('Enter integers separated by spaces: ')
__UpperCAmelCase = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 65 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCAmelCase = {
'configuration_bridgetower': [
'BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BridgeTowerConfig',
'BridgeTowerTextConfig',
'BridgeTowerVisionConfig',
],
'processing_bridgetower': ['BridgeTowerProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['BridgeTowerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST',
'BridgeTowerForContrastiveLearning',
'BridgeTowerForImageAndTextRetrieval',
'BridgeTowerForMaskedLM',
'BridgeTowerModel',
'BridgeTowerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 65 | 1 |
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def _SCREAMING_SNAKE_CASE ( ):
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def _SCREAMING_SNAKE_CASE ( ):
A_ : Optional[int] = '''mock-s3-bucket'''
A_ : Optional[Any] = f'''s3://{mock_bucket}'''
A_ : List[Any] = extract_path_from_uri(SCREAMING_SNAKE_CASE )
assert dataset_path.startswith('''s3://''' ) is False
A_ : List[Any] = '''./local/path'''
A_ : List[Any] = extract_path_from_uri(SCREAMING_SNAKE_CASE )
assert dataset_path == new_dataset_path
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : Union[str, Any] = is_remote_filesystem(SCREAMING_SNAKE_CASE )
assert is_remote is True
A_ : List[Any] = fsspec.filesystem('''file''' )
A_ : Optional[Any] = is_remote_filesystem(SCREAMING_SNAKE_CASE )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : Any = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
A_ : List[str] = input_paths[compression_fs_class.protocol]
if input_path is None:
A_ : Any = f'''for \'{compression_fs_class.protocol}\' compression protocol, '''
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(SCREAMING_SNAKE_CASE )
A_ : Dict = fsspec.filesystem(compression_fs_class.protocol , fo=SCREAMING_SNAKE_CASE )
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A_ : Dict = os.path.basename(SCREAMING_SNAKE_CASE )
A_ : List[str] = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f, open(SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : Tuple = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
A_ : Optional[Any] = compressed_file_paths[protocol]
A_ : List[Any] = '''dataset.jsonl'''
A_ : List[Any] = f'''{protocol}://{member_file_path}::{compressed_file_path}'''
A_ , *A_ : Optional[int] = fsspec.get_fs_token_paths(SCREAMING_SNAKE_CASE )
assert fs.isfile(SCREAMING_SNAKE_CASE )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : Any = hf_api.dataset_info(SCREAMING_SNAKE_CASE , token=SCREAMING_SNAKE_CASE )
A_ : Any = HfFileSystem(repo_info=SCREAMING_SNAKE_CASE , token=SCREAMING_SNAKE_CASE )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(SCREAMING_SNAKE_CASE ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def _SCREAMING_SNAKE_CASE ( ):
A_ : Dict = '''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , clobber=SCREAMING_SNAKE_CASE )
with pytest.warns(SCREAMING_SNAKE_CASE ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(SCREAMING_SNAKE_CASE ) == 1
assert (
str(warning_info[0].message )
== f'''A filesystem protocol was already set for {protocol} and will be overwritten.'''
)
| 152 |
import argparse
import json
from tqdm import tqdm
def _SCREAMING_SNAKE_CASE ( ):
A_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--src_path''' , type=SCREAMING_SNAKE_CASE , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , )
parser.add_argument(
'''--evaluation_set''' , type=SCREAMING_SNAKE_CASE , help='''where to store parsed evaluation_set file''' , )
parser.add_argument(
'''--gold_data_path''' , type=SCREAMING_SNAKE_CASE , help='''where to store parsed gold_data_path file''' , )
A_ : Optional[int] = parser.parse_args()
with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open(
args.gold_data_path , '''w''' ) as gold_file:
A_ : Tuple = json.load(SCREAMING_SNAKE_CASE )
for dpr_record in tqdm(SCREAMING_SNAKE_CASE ):
A_ : Optional[Any] = dpr_record['''question''']
A_ : Union[str, Any] = [context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + '''\n''' )
gold_file.write('''\t'''.join(SCREAMING_SNAKE_CASE ) + '''\n''' )
if __name__ == "__main__":
main()
| 152 | 1 |
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def snake_case__ ( lowercase ):
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def snake_case__ ( lowercase ):
class _lowercase :
'''simple docstring'''
def __init__( self , lowerCamelCase__ ):
lowerCAmelCase_: Optional[int] = metric_id
class _lowercase :
'''simple docstring'''
SCREAMING_SNAKE_CASE: List[Any] = [MetricMock(_A ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']]
def _a ( self ):
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def snake_case__ ( lowercase , lowercase , lowercase , lowercase , lowercase ):
if "tmp_path" in args:
lowerCAmelCase_: Any = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(A_ , match="https://huggingface.co/docs/evaluate" ):
func(*A_ ) | 613 | '''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case : Dict = logging.get_logger(__name__)
__snake_case : Tuple = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
__snake_case : Tuple = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
__snake_case : Dict = {
'''abeja/gpt-neox-japanese-2.7b''': 20_48,
}
def lowerCamelCase__ ( A_ , A_ ):
with open(A_ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase_ = json.loads(f.read() )
UpperCAmelCase_ = collections.OrderedDict()
UpperCAmelCase_ = collections.OrderedDict()
UpperCAmelCase_ = collections.OrderedDict()
with open(A_ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(A_ ):
UpperCAmelCase_ = b
UpperCAmelCase_ = idx
for wd in b:
UpperCAmelCase_ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowercase_ ( _A ):
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|startoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__=False , **UpperCamelCase__ , ) -> int:
"""simple docstring"""
super().__init__(
unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , do_clean_text=UpperCamelCase__ , **UpperCamelCase__ , )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(
F"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(
F"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
UpperCAmelCase_ = do_clean_text
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = load_vocab_and_emoji(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase_ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
return len(self.raw_vocab )
def lowerCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
return self.subword_tokenizer.tokenize(UpperCamelCase__ , clean=self.do_clean_text )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> int:
"""simple docstring"""
return self.vocab.get(UpperCamelCase__ , self.vocab.get(self.unk_token ) )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = "".join(UpperCamelCase__ ).strip()
return out_string
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[int]:
"""simple docstring"""
UpperCAmelCase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
UpperCAmelCase_ = input_ids[-self.model_max_length :]
return input_ids
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase_ = 0
if os.path.isdir(UpperCamelCase__ ):
UpperCAmelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] )
else:
UpperCAmelCase_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
UpperCAmelCase_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!" )
UpperCAmelCase_ = token_index
writer.write(",".join(UpperCamelCase__ ) + "\n" )
index += 1
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as writer:
json.dump(self.emoji , UpperCamelCase__ )
return vocab_file, emoji_file
class lowercase_ ( _A ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = vocab # same as swe
UpperCAmelCase_ = ids_to_tokens # same as bpe
UpperCAmelCase_ = emoji
UpperCAmelCase_ = np.max([len(UpperCamelCase__ ) for w in self.vocab.keys()] )
UpperCAmelCase_ = re.compile(R"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" )
UpperCAmelCase_ = re.compile(R"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" )
UpperCAmelCase_ = re.compile(R"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" )
UpperCAmelCase_ = re.compile(
R"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
UpperCAmelCase_ = re.compile(
R"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
UpperCAmelCase_ = re.compile(
R"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" )
UpperCAmelCase_ = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
UpperCAmelCase_ = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
UpperCAmelCase_ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} )
def __len__( self ) -> int:
"""simple docstring"""
return len(self.ids_to_tokens )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = self.content_repattera.sub("<URL>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<EMAIL>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<TEL>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<DATE>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<DATE>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<PRICE>" , UpperCamelCase__ )
UpperCAmelCase_ = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
UpperCAmelCase_ = content.replace("<BLOCK><BLOCK>" , "<BLOCK>" )
return content
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__=False ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = text.replace(" " , "<SP>" )
UpperCAmelCase_ = text.replace(" " , "<SP>" )
UpperCAmelCase_ = text.replace("\r\n" , "<BR>" )
UpperCAmelCase_ = text.replace("\n" , "<BR>" )
UpperCAmelCase_ = text.replace("\r" , "<BR>" )
UpperCAmelCase_ = text.replace("\t" , "<TAB>" )
UpperCAmelCase_ = text.replace("—" , "ー" )
UpperCAmelCase_ = text.replace("−" , "ー" )
for k, v in self.emoji["emoji"].items():
if k in text:
UpperCAmelCase_ = text.replace(UpperCamelCase__ , UpperCamelCase__ )
if clean:
UpperCAmelCase_ = self.clean_text(UpperCamelCase__ )
def check_simbol(UpperCamelCase__ ):
UpperCAmelCase_ = x.encode()
if len(UpperCamelCase__ ) == 1 and len(UpperCamelCase__ ) == 2:
UpperCAmelCase_ = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0Xc_2_a_1 and c <= 0Xc_2_b_f)
or (c >= 0Xc_7_8_0 and c <= 0Xc_7_8_3)
or (c >= 0Xc_a_b_9 and c <= 0Xc_b_b_f)
or (c >= 0Xc_c_8_0 and c <= 0Xc_d_a_2)
):
return True
return False
def checkuae(UpperCamelCase__ ):
UpperCAmelCase_ = x.encode()
if len(UpperCamelCase__ ) == 1 and len(UpperCamelCase__ ) == 3:
UpperCAmelCase_ = (int(e[0] ) << 1_6) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0Xe_2_8_0_8_0 and c <= 0Xe_2_b_0_7_f:
return True
return False
UpperCAmelCase_ = 0
UpperCAmelCase_ = []
while pos < len(UpperCamelCase__ ):
UpperCAmelCase_ = min(len(UpperCamelCase__ ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3
UpperCAmelCase_ = [] # (token_id, token, pos)
for e in range(UpperCamelCase__ , UpperCamelCase__ , -1 ):
UpperCAmelCase_ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(UpperCamelCase__ ) > 2:
UpperCAmelCase_ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(UpperCamelCase__ ) > 0:
# the smallest token_id is adopted
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x[0] )[0]
result.append(UpperCamelCase__ )
UpperCAmelCase_ = e
else:
UpperCAmelCase_ = pos + 1
UpperCAmelCase_ = text[pos:end]
if check_simbol(UpperCamelCase__ ):
result.append("<KIGOU>" )
elif checkuae(UpperCamelCase__ ):
result.append("<U2000U2BFF>" )
else:
for i in wd.encode("utf-8" ):
result.append("<|byte%d|>" % i )
UpperCAmelCase_ = end
return result
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__="\n" ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(UpperCamelCase__ ) > 0:
words.append(bytearray(UpperCamelCase__ ).decode("utf-8" , errors="replace" ) )
UpperCAmelCase_ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word] )
elif word == "<SP>":
words.append(" " )
elif word == "<BR>":
words.append(UpperCamelCase__ )
elif word == "<TAB>":
words.append("\t" )
elif word == "<BLOCK>":
words.append("▀" )
elif word == "<KIGOU>":
words.append("ǀ" )
elif word == "<U2000U2BFF>":
words.append("‖" )
else:
words.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
words.append(bytearray(UpperCamelCase__ ).decode("utf-8" , errors="replace" ) )
UpperCAmelCase_ = "".join(UpperCamelCase__ )
return text
| 660 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase ( UpperCamelCase_ , unittest.TestCase ):
A_ : Union[str, Any] = KandinskyInpaintPipeline
A_ : List[Any] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
A_ : Optional[int] = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
A_ : Optional[int] = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
A_ : Union[str, Any] = False
@property
def _A ( self : str ):
'''simple docstring'''
return 32
@property
def _A ( self : List[Any] ):
'''simple docstring'''
return 32
@property
def _A ( self : Optional[int] ):
'''simple docstring'''
return self.time_input_dim
@property
def _A ( self : Optional[int] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _A ( self : List[Any] ):
'''simple docstring'''
return 100
@property
def _A ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def _A ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
lowerCAmelCase__ : Union[str, Any] = MultilingualCLIP(_lowerCAmelCase )
lowerCAmelCase__ : str = text_encoder.eval()
return text_encoder
@property
def _A ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Any = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
lowerCAmelCase__ : int = UNetaDConditionModel(**_lowerCAmelCase )
return model
@property
def _A ( self : Tuple ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _A ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : int = VQModel(**self.dummy_movq_kwargs )
return model
def _A ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.dummy_text_encoder
lowerCAmelCase__ : List[str] = self.dummy_tokenizer
lowerCAmelCase__ : Union[str, Any] = self.dummy_unet
lowerCAmelCase__ : int = self.dummy_movq
lowerCAmelCase__ : List[str] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.00085 , beta_end=0.012 , clip_sample=_lowerCAmelCase , set_alpha_to_one=_lowerCAmelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=_lowerCAmelCase , )
lowerCAmelCase__ : Tuple = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def _A ( self : List[str] , a__ : Optional[int] , a__ : str=0 ):
'''simple docstring'''
lowerCAmelCase__ : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
lowerCAmelCase__ : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_lowerCAmelCase )
# create init_image
lowerCAmelCase__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
lowerCAmelCase__ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase__ : Dict = Image.fromarray(np.uinta(_lowerCAmelCase ) ).convert("RGB" ).resize((256, 256) )
# create mask
lowerCAmelCase__ : Optional[Any] = np.ones((64, 64) , dtype=np.floataa )
lowerCAmelCase__ : Optional[int] = 0
if str(_lowerCAmelCase ).startswith("mps" ):
lowerCAmelCase__ : Any = torch.manual_seed(_lowerCAmelCase )
else:
lowerCAmelCase__ : Optional[Any] = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
lowerCAmelCase__ : str = {
"prompt": "horse",
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def _A ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = "cpu"
lowerCAmelCase__ : Optional[Any] = self.get_dummy_components()
lowerCAmelCase__ : Any = self.pipeline_class(**_lowerCAmelCase )
lowerCAmelCase__ : Tuple = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowerCAmelCase__ : Tuple = pipe(**self.get_dummy_inputs(_lowerCAmelCase ) )
lowerCAmelCase__ : Dict = output.images
lowerCAmelCase__ : Optional[Any] = pipe(
**self.get_dummy_inputs(_lowerCAmelCase ) , return_dict=_lowerCAmelCase , )[0]
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
print(F'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : str = np.array(
[0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def _A ( self : Any ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def _A ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" )
lowerCAmelCase__ : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
lowerCAmelCase__ : str = np.ones((768, 768) , dtype=np.floataa )
lowerCAmelCase__ : List[str] = 0
lowerCAmelCase__ : Tuple = "a hat"
lowerCAmelCase__ : int = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCAmelCase )
lowerCAmelCase__ : Dict = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa )
lowerCAmelCase__ : List[str] = pipeline.to(_lowerCAmelCase )
pipeline.set_progress_bar_config(disable=_lowerCAmelCase )
lowerCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = pipe_prior(
_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
lowerCAmelCase__ : Union[str, Any] = pipeline(
_lowerCAmelCase , image=_lowerCAmelCase , mask_image=_lowerCAmelCase , image_embeds=_lowerCAmelCase , negative_image_embeds=_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=100 , height=768 , width=768 , output_type="np" , )
lowerCAmelCase__ : int = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase ) | 709 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
snake_case = None
snake_case = logging.get_logger(__name__)
snake_case = {"""vocab_file""": """sentencepiece.model""", """tokenizer_file""": """tokenizer.json"""}
snake_case = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
"""tokenizer_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/tokenizer.json""",
},
}
snake_case = {
"""google/rembert""": 2_56,
}
snake_case = """▁"""
class lowerCAmelCase ( UpperCamelCase_ ):
A_ : Dict = VOCAB_FILES_NAMES
A_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
A_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Tuple = RemBertTokenizer
def __init__( self : Dict , a__ : int=None , a__ : List[Any]=None , a__ : List[Any]=True , a__ : Dict=True , a__ : int=False , a__ : Tuple="[CLS]" , a__ : Optional[int]="[SEP]" , a__ : Optional[Any]="<unk>" , a__ : List[str]="[SEP]" , a__ : Any="<pad>" , a__ : List[str]="[CLS]" , a__ : int="[MASK]" , **a__ : Dict , ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
super().__init__(
a__ , tokenizer_file=a__ , do_lower_case=a__ , remove_space=a__ , keep_accents=a__ , bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , **a__ , )
lowerCAmelCase__ : Dict = do_lower_case
lowerCAmelCase__ : List[str] = remove_space
lowerCAmelCase__ : Optional[int] = keep_accents
lowerCAmelCase__ : Tuple = vocab_file
lowerCAmelCase__ : Optional[int] = False if not self.vocab_file else True
def _A ( self : List[str] , a__ : List[int] , a__ : Optional[List[int]] = None ):
'''simple docstring'''
lowerCAmelCase__ : Any = [self.sep_token_id]
lowerCAmelCase__ : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _A ( self : List[str] , a__ : List[int] , a__ : Optional[List[int]] = None , a__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(a__ )) + [1] + ([0] * len(a__ )) + [1]
return [1] + ([0] * len(a__ )) + [1]
def _A ( self : Tuple , a__ : List[int] , a__ : Optional[List[int]] = None ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = [self.sep_token_id]
lowerCAmelCase__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _A ( self : List[str] , a__ : str , a__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(a__ ):
logger.error("Vocabulary path ({}) should be a directory".format(a__ ) )
return
lowerCAmelCase__ : List[Any] = os.path.join(
a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ):
copyfile(self.vocab_file , a__ )
return (out_vocab_file,)
| 568 | 0 |
'''simple docstring'''
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class _a :
'''simple docstring'''
def __init__( self, A, A=100, A=13, A=30, A=2, A=3, A=True, A=True, A=32, A=4, A=4, A=37, A="gelu", A=0.1, A=0.1, A=10, A=0.02, A=3, A=None, A=[0, 1, 2, 3], ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = parent
SCREAMING_SNAKE_CASE : List[Any] = 100
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : str = image_size
SCREAMING_SNAKE_CASE : List[str] = patch_size
SCREAMING_SNAKE_CASE : Dict = num_channels
SCREAMING_SNAKE_CASE : List[Any] = is_training
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : List[str] = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : int = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = scope
SCREAMING_SNAKE_CASE : Union[str, Any] = out_indices
SCREAMING_SNAKE_CASE : Tuple = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE : Union[str, Any] = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE : Union[str, Any] = num_patches + 1
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size], self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self ):
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=A, initializer_range=self.initializer_range, out_indices=self.out_indices, )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = BeitModel(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = BeitForMaskedImageModeling(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : str = model(A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.type_sequence_label_size
SCREAMING_SNAKE_CASE : str = BeitForImageClassification(A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(A, labels=A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE : Dict = 1
SCREAMING_SNAKE_CASE : Any = BeitForImageClassification(A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : str = model(A, labels=A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE : List[str] = BeitForSemanticSegmentation(A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(A )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
SCREAMING_SNAKE_CASE : Dict = model(A, labels=A )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Tuple = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
A : Tuple = (
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A : Optional[Any] = False
A : Any = False
A : List[str] = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = BeitModelTester(self )
SCREAMING_SNAKE_CASE : str = ConfigTester(self, config_class=A, has_text_modality=A, hidden_size=37 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[str] = model_class(A )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
SCREAMING_SNAKE_CASE : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A, nn.Linear ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : int = model_class(A )
SCREAMING_SNAKE_CASE : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : List[str] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1], A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[str] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(A ), BeitForMaskedImageModeling]:
continue
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(A )
model.to(A )
model.train()
SCREAMING_SNAKE_CASE : Any = self._prepare_for_class(A, A, return_labels=A )
SCREAMING_SNAKE_CASE : int = model(**A ).loss
loss.backward()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : str = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(A ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
SCREAMING_SNAKE_CASE : List[Any] = model_class(A )
model.gradient_checkpointing_enable()
model.to(A )
model.train()
SCREAMING_SNAKE_CASE : List[str] = self._prepare_for_class(A, A, return_labels=A )
SCREAMING_SNAKE_CASE : Dict = model(**A ).loss
loss.backward()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[str] = _config_zero_init(A )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[int] = model_class(config=A )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=F"Parameter {name} of model {model_class} seems not properly initialized", )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : str = BeitModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(A )
SCREAMING_SNAKE_CASE : Any = self.default_image_processor
SCREAMING_SNAKE_CASE : Tuple = prepare_img()
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(images=A, return_tensors='pt' ).pixel_values.to(A )
# prepare bool_masked_pos
SCREAMING_SNAKE_CASE : Optional[int] = torch.ones((1, 196), dtype=torch.bool ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(pixel_values=A, bool_masked_pos=A )
SCREAMING_SNAKE_CASE : List[Any] = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape, A )
SCREAMING_SNAKE_CASE : Any = torch.tensor(
[[-3.24_37, 0.50_72, -13.91_74], [-3.24_56, 0.49_48, -13.94_01], [-3.20_33, 0.51_21, -13.85_50]] ).to(A )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3], A, atol=1E-2 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(A )
SCREAMING_SNAKE_CASE : List[str] = self.default_image_processor
SCREAMING_SNAKE_CASE : Any = prepare_img()
SCREAMING_SNAKE_CASE : List[str] = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**A )
SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE : str = torch.Size((1, 1_000) )
self.assertEqual(logits.shape, A )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([-1.23_85, -1.09_87, -1.01_08] ).to(A )
self.assertTrue(torch.allclose(logits[0, :3], A, atol=1E-4 ) )
SCREAMING_SNAKE_CASE : int = 281
self.assertEqual(logits.argmax(-1 ).item(), A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
A )
SCREAMING_SNAKE_CASE : List[str] = self.default_image_processor
SCREAMING_SNAKE_CASE : str = prepare_img()
SCREAMING_SNAKE_CASE : str = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = model(**A )
SCREAMING_SNAKE_CASE : Optional[Any] = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE : Tuple = torch.Size((1, 21_841) )
self.assertEqual(logits.shape, A )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([1.68_81, -0.27_87, 0.59_01] ).to(A )
self.assertTrue(torch.allclose(logits[0, :3], A, atol=1E-4 ) )
SCREAMING_SNAKE_CASE : Dict = 2_396
self.assertEqual(logits.argmax(-1 ).item(), A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
SCREAMING_SNAKE_CASE : Tuple = model.to(A )
SCREAMING_SNAKE_CASE : int = BeitImageProcessor(do_resize=A, size=640, do_center_crop=A )
SCREAMING_SNAKE_CASE : Optional[int] = load_dataset('hf-internal-testing/fixtures_ade20k', split='test' )
SCREAMING_SNAKE_CASE : str = Image.open(ds[0]['file'] )
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(**A )
SCREAMING_SNAKE_CASE : str = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape, A )
SCREAMING_SNAKE_CASE : str = version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(
[
[[-4.92_25, -2.39_54, -3.05_22], [-2.88_22, -1.00_46, -1.75_61], [-2.95_49, -1.32_28, -2.13_47]],
[[-5.81_68, -3.41_29, -4.07_78], [-3.86_51, -2.22_14, -3.02_77], [-3.83_56, -2.46_43, -3.35_35]],
[[-0.00_78, 3.99_52, 4.07_54], [2.98_56, 4.69_44, 5.00_35], [3.24_13, 4.78_13, 4.99_69]],
], device=A, )
else:
SCREAMING_SNAKE_CASE : int = torch.tensor(
[
[[-4.89_60, -2.36_88, -3.03_55], [-2.84_78, -0.98_36, -1.74_18], [-2.94_49, -1.33_32, -2.14_56]],
[[-5.80_81, -3.41_24, -4.10_06], [-3.85_61, -2.20_81, -3.03_23], [-3.83_65, -2.46_01, -3.36_69]],
[[-0.03_09, 3.98_68, 4.05_40], [2.96_40, 4.68_77, 4.99_76], [3.20_81, 4.76_90, 4.99_42]],
], device=A, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], A, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
SCREAMING_SNAKE_CASE : Tuple = model.to(A )
SCREAMING_SNAKE_CASE : Tuple = BeitImageProcessor(do_resize=A, size=640, do_center_crop=A )
SCREAMING_SNAKE_CASE : Any = load_dataset('hf-internal-testing/fixtures_ade20k', split='test' )
SCREAMING_SNAKE_CASE : Optional[int] = Image.open(ds[0]['file'] )
SCREAMING_SNAKE_CASE : Any = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = model(**A )
SCREAMING_SNAKE_CASE : List[str] = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE : List[Any] = image_processor.post_process_semantic_segmentation(outputs=A, target_sizes=[(500, 300)] )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape, A )
SCREAMING_SNAKE_CASE : List[Any] = image_processor.post_process_semantic_segmentation(outputs=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape, A )
| 28 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : int | float | str , SCREAMING_SNAKE_CASE : int | float | str ):
if nth_term == "":
return [""]
UpperCAmelCase = int(SCREAMING_SNAKE_CASE )
UpperCAmelCase = int(SCREAMING_SNAKE_CASE )
UpperCAmelCase = []
for temp in range(int(SCREAMING_SNAKE_CASE ) ):
series.append(f'''1 / {pow(temp + 1 , int(SCREAMING_SNAKE_CASE ) )}''' if series else '1' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
_a : List[Any] = int(input('Enter the last number (nth term) of the P-Series'))
_a : Tuple = int(input('Enter the power for P-Series'))
print('Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p')
print(p_series(nth_term, power))
| 447 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : str = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__A : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 705 |
"""simple docstring"""
def snake_case__ ( _lowerCamelCase, _lowerCamelCase ) ->int:
"""simple docstring"""
return abs(_lowerCamelCase ) if a == 0 else greatest_common_divisor(b % a, _lowerCamelCase )
def snake_case__ ( _lowerCamelCase, _lowerCamelCase ) ->int:
"""simple docstring"""
while y: # --> when y=0 then loop will terminate and return x as final GCD.
__lowercase ,__lowercase : Any = y, x % y
return abs(_lowerCamelCase )
def snake_case__ ( ) ->Optional[int]:
"""simple docstring"""
try:
__lowercase : Optional[int] = input("Enter two integers separated by comma (,): " ).split("," )
__lowercase : Optional[Any] = int(nums[0] )
__lowercase : str = int(nums[1] )
print(
F'greatest_common_divisor({num_a}, {num_a}) = '
F'{greatest_common_divisor(_lowerCamelCase, _lowerCamelCase )}' )
print(F'By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(_lowerCamelCase, _lowerCamelCase )}' )
except (IndexError, UnboundLocalError, ValueError):
print("Wrong input" )
if __name__ == "__main__":
main()
| 281 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[Any] = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
_snake_case : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 53 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : Dict=3_2 , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : Union[str, Any]=1_0 , lowerCAmelCase_ : List[str]=[1_0, 2_0, 3_0, 4_0] , lowerCAmelCase_ : Optional[int]=[1, 1, 2, 1] , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Tuple="relu" , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : Optional[int]=None , ) -> int:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = embeddings_size
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = depths
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = hidden_act
__lowerCAmelCase = num_labels
__lowerCAmelCase = scope
__lowerCAmelCase = len(lowerCAmelCase_ )
def lowercase ( self : Optional[int] ) -> List[Any]:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = self.get_config()
return config, pixel_values
def lowercase ( self : Tuple ) -> List[Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowercase ( self : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] ) -> str:
__lowerCAmelCase = FlaxRegNetModel(config=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def lowercase ( self : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int ) -> Tuple:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = FlaxRegNetForImageClassification(config=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : List[Any] ) -> Optional[Any]:
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
a_ = False
a_ = False
a_ = False
def lowercase ( self : Dict ) -> None:
__lowerCAmelCase = FlaxRegNetModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ )
def lowercase ( self : int ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self : str ) -> Union[str, Any]:
return
def lowercase ( self : Dict ) -> str:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] ) -> Tuple:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def lowercase ( self : Union[str, Any] ) -> Any:
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def lowercase ( self : Tuple ) -> Tuple:
pass
def lowercase ( self : Optional[Any] ) -> str:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowercase ( self : List[Any] ) -> Union[str, Any]:
def check_hidden_states_output(lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ):
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
__lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase_ ) , expected_num_stages + 1 )
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : str ) -> str:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCAmelCase = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = model_class(lowerCAmelCase_ )
@jax.jit
def model_jitted(lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Dict ):
return model(pixel_values=lowerCAmelCase_ , **lowerCAmelCase_ )
with self.subTest('JIT Enabled' ):
__lowerCAmelCase = model_jitted(**lowerCAmelCase_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__lowerCAmelCase = model_jitted(**lowerCAmelCase_ ).to_tuple()
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def a_ ( ):
__lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase ( self : Union[str, Any] ) -> Optional[Any]:
return AutoImageProcessor.from_pretrained('facebook/regnet-y-040' ) if is_vision_available() else None
@slow
def lowercase ( self : Optional[Any] ) -> Union[str, Any]:
__lowerCAmelCase = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040' )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=lowerCAmelCase_ , return_tensors='np' )
__lowerCAmelCase = model(**lowerCAmelCase_ )
# verify the logits
__lowerCAmelCase = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
__lowerCAmelCase = jnp.array([-0.41_80, -1.50_51, -3.48_36] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 53 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class UpperCamelCase ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE : Any = SamImageProcessor()
_SCREAMING_SNAKE_CASE : List[str] = SamProcessor(snake_case__ )
processor.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self , **snake_case__ ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__ ).image_processor
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_SCREAMING_SNAKE_CASE : Union[str, Any] = [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE : Dict = self.get_image_processor(do_normalize=snake_case__ , padding_value=1.0 )
_SCREAMING_SNAKE_CASE : List[Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=snake_case__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case__ )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = self.get_image_processor()
_SCREAMING_SNAKE_CASE : Any = SamProcessor(image_processor=snake_case__ )
_SCREAMING_SNAKE_CASE : List[Any] = self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(snake_case__ , return_tensors="np" )
_SCREAMING_SNAKE_CASE : Any = processor(images=snake_case__ , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = self.get_image_processor()
_SCREAMING_SNAKE_CASE : Dict = SamProcessor(image_processor=snake_case__ )
_SCREAMING_SNAKE_CASE : Tuple = [torch.ones((1, 3, 5, 5) )]
_SCREAMING_SNAKE_CASE : int = [[1764, 2646]]
_SCREAMING_SNAKE_CASE : Dict = [[683, 1024]]
_SCREAMING_SNAKE_CASE : List[Any] = processor.post_process_masks(snake_case__ , snake_case__ , snake_case__ )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_SCREAMING_SNAKE_CASE : str = processor.post_process_masks(
snake_case__ , torch.tensor(snake_case__ ) , torch.tensor(snake_case__ ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
_SCREAMING_SNAKE_CASE : Any = [np.ones((1, 3, 5, 5) )]
_SCREAMING_SNAKE_CASE : Optional[Any] = processor.post_process_masks(snake_case__ , np.array(snake_case__ ) , np.array(snake_case__ ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_SCREAMING_SNAKE_CASE : Optional[int] = [[1, 0], [0, 1]]
with self.assertRaises(snake_case__ ):
_SCREAMING_SNAKE_CASE : int = processor.post_process_masks(snake_case__ , np.array(snake_case__ ) , np.array(snake_case__ ) )
@require_vision
@require_tf
class UpperCamelCase ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE : Union[str, Any] = SamImageProcessor()
_SCREAMING_SNAKE_CASE : str = SamProcessor(snake_case__ )
processor.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self , **snake_case__ ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__ ).image_processor
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_SCREAMING_SNAKE_CASE : int = [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE : int = self.get_image_processor(do_normalize=snake_case__ , padding_value=1.0 )
_SCREAMING_SNAKE_CASE : Tuple = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=snake_case__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case__ )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = self.get_image_processor()
_SCREAMING_SNAKE_CASE : str = SamProcessor(image_processor=snake_case__ )
_SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE : str = image_processor(snake_case__ , return_tensors="np" )
_SCREAMING_SNAKE_CASE : str = processor(images=snake_case__ , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = self.get_image_processor()
_SCREAMING_SNAKE_CASE : List[Any] = SamProcessor(image_processor=snake_case__ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = [tf.ones((1, 3, 5, 5) )]
_SCREAMING_SNAKE_CASE : List[str] = [[1764, 2646]]
_SCREAMING_SNAKE_CASE : Tuple = [[683, 1024]]
_SCREAMING_SNAKE_CASE : int = processor.post_process_masks(snake_case__ , snake_case__ , snake_case__ , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_SCREAMING_SNAKE_CASE : Union[str, Any] = processor.post_process_masks(
snake_case__ , tf.convert_to_tensor(snake_case__ ) , tf.convert_to_tensor(snake_case__ ) , return_tensors="tf" , )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
_SCREAMING_SNAKE_CASE : str = [np.ones((1, 3, 5, 5) )]
_SCREAMING_SNAKE_CASE : List[str] = processor.post_process_masks(
snake_case__ , np.array(snake_case__ ) , np.array(snake_case__ ) , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_SCREAMING_SNAKE_CASE : int = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
_SCREAMING_SNAKE_CASE : str = processor.post_process_masks(
snake_case__ , np.array(snake_case__ ) , np.array(snake_case__ ) , return_tensors="tf" )
@require_vision
@require_torchvision
class UpperCamelCase ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE : Tuple = SamImageProcessor()
_SCREAMING_SNAKE_CASE : Any = SamProcessor(snake_case__ )
processor.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self , **snake_case__ ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__ ).image_processor
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_SCREAMING_SNAKE_CASE : Dict = [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = self.get_image_processor()
_SCREAMING_SNAKE_CASE : List[str] = SamProcessor(image_processor=snake_case__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
_SCREAMING_SNAKE_CASE : List[Any] = [tf.convert_to_tensor(snake_case__ )]
_SCREAMING_SNAKE_CASE : Any = [torch.tensor(snake_case__ )]
_SCREAMING_SNAKE_CASE : Tuple = [[1764, 2646]]
_SCREAMING_SNAKE_CASE : List[Any] = [[683, 1024]]
_SCREAMING_SNAKE_CASE : Tuple = processor.post_process_masks(
snake_case__ , snake_case__ , snake_case__ , return_tensors="tf" )
_SCREAMING_SNAKE_CASE : Any = processor.post_process_masks(
snake_case__ , snake_case__ , snake_case__ , return_tensors="pt" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = self.get_image_processor()
_SCREAMING_SNAKE_CASE : List[Any] = SamProcessor(image_processor=snake_case__ )
_SCREAMING_SNAKE_CASE : List[Any] = self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE : int = image_processor(snake_case__ , return_tensors="pt" )["pixel_values"].numpy()
_SCREAMING_SNAKE_CASE : str = processor(images=snake_case__ , return_tensors="pt" )["pixel_values"].numpy()
_SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(snake_case__ , return_tensors="tf" )["pixel_values"].numpy()
_SCREAMING_SNAKE_CASE : Optional[Any] = processor(images=snake_case__ , return_tensors="tf" )["pixel_values"].numpy()
self.assertTrue(np.allclose(snake_case__ , snake_case__ ) )
self.assertTrue(np.allclose(snake_case__ , snake_case__ ) )
self.assertTrue(np.allclose(snake_case__ , snake_case__ ) )
| 295 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class UpperCamelCase :
A__ = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be trained."""} )
A__ = field(
default="""./""" , metadata={"""help""": """Save dir where model repo is cloned and models updates are saved to."""} )
A__ = field(
default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path of training dataset."""} )
A__ = field(
default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""} )
A__ = field(default=2 , metadata={"""help""": """Batch size for training."""} )
A__ = field(default=2 , metadata={"""help""": """Batch size for evaluation."""} )
A__ = field(default=0.1 , metadata={"""help""": """Value of weight decay."""} )
A__ = field(
default=10000 , metadata={"""help""": """Size of buffer used to shuffle streaming dataset."""} )
A__ = field(default=2e-4 , metadata={"""help""": """Learning rate fo training."""} )
A__ = field(default="""cosine""" , metadata={"""help""": """Learning rate."""} )
A__ = field(
default=750 , metadata={"""help""": """Number of warmup steps in the learning rate schedule."""} )
A__ = field(
default=16 , metadata={"""help""": """Number of gradient accumulation steps."""} )
A__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Use gradient checkpointing to reduce memory footprint."""} )
A__ = field(default=50000 , metadata={"""help""": """Maximum number of training steps."""} )
A__ = field(
default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} )
A__ = field(default=1024 , metadata={"""help""": """Sequence lengths used for training."""} )
A__ = field(default=1 , metadata={"""help""": """Training seed."""} )
A__ = field(
default=1024 , metadata={"""help""": """Interval to save checkpoints. Measured as number of forward passes not training steps."""} , )
A__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """States path if the training should continue from a checkpoint folder."""} )
A__ = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """If True the data is pretokenized."""} )
@dataclass
class UpperCamelCase :
A__ = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""} )
A__ = field(
default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""} )
A__ = field(default=2 , metadata={"""help""": """Batch size used for evaluation."""} )
A__ = field(
default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} )
A__ = field(default=1024 , metadata={"""help""": """Length of sequences to be evaluated."""} )
A__ = field(default=1 , metadata={"""help""": """Random seed used for evaluation."""} )
@dataclass
class UpperCamelCase :
A__ = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""} )
A__ = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Number of workers used for code evaluation."""} )
A__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """The number of human-eval tasks to run. If not included all tasks are evaluated."""} , )
A__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Sample from the language model's output distribution."""} )
A__ = field(default=0.2 , metadata={"""help""": """Sampling temperature used for generation."""} )
A__ = field(default=256 , metadata={"""help""": """Maximum number of newly generated tokens."""} )
A__ = field(default=0 , metadata={"""help""": """Top-k parameter used for generation."""} )
A__ = field(default=0.95 , metadata={"""help""": """Top-p parameter used for nucleus sampling."""} )
A__ = field(default=10 , metadata={"""help""": """Number of generations to run in parallel."""} )
A__ = field(
default=200 , metadata={"""help""": """Number of completions to generate for each sample."""} )
A__ = field(default=1 , metadata={"""help""": """Random seed used for evaluation."""} )
A__ = field(
default="""eval_results.json""" , metadata={"""help""": """Random seed used for evaluation."""} )
A__ = field(
default="""0""" , metadata={"""help""": """Allow `code_eval` to execute Python code on machine"""} )
A__ = field(
default=-1 , metadata={
"""help""": (
"""Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"""
""" number corresponds to which GPU device id to run on."""
)
} , )
@dataclass
class UpperCamelCase :
A__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": """The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."""
} , )
A__ = field(
default="""transformersbook/codeparrot""" , metadata={"""help""": """Folder or name of dataset to process."""} )
A__ = field(
default="""codeparrot-clean""" , metadata={"""help""": """Folder to save processed processed dataset."""} )
A__ = field(
default=100000 , metadata={"""help""": """Number of files to save per JSON output file."""} )
A__ = field(default="""content""" , metadata={"""help""": """Column containing text data to process."""} )
A__ = field(
default=1000 , metadata={"""help""": """Maximum line length in file, otherwise file is filtered."""} )
A__ = field(
default=100 , metadata={"""help""": """Maximum mean line length in file, otherwise file is filtered."""} )
A__ = field(
default=0.25 , metadata={"""help""": """Maximum fraction of non-alphanumeric characters, otherwise file is filtered."""} )
A__ = field(
default=1.5 , metadata={"""help""": """Minimum character token ratio for the file, otherwise file is filtered."""} )
A__ = field(
default=0.7 , metadata={"""help""": """Probability for filtering config, test and uncommon files."""} )
A__ = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""} , )
A__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """If True, near-duplicate samples are removed."""} )
A__ = field(
default=0.85 , metadata={"""help""": """Jaccard threshold for near-duplicate samples."""} )
@dataclass
class UpperCamelCase :
A__ = field(
default="""gpt2""" , metadata={"""help""": """Base tokenizer to build new tokenizer from."""} )
A__ = field(
default="""transformersbook/codeparrot-train""" , metadata={"""help""": """Dataset to train tokenizer on."""} )
A__ = field(default="""content""" , metadata={"""help""": """Column containing text data to process."""} )
A__ = field(default=200000 , metadata={"""help""": """Number of examples to train tokenizer on."""} )
A__ = field(
default=32768 , metadata={"""help""": """Number of examples to train the tokenizer on."""} )
A__ = field(default="""codeparrot""" , metadata={"""help""": """Name of new tokenizer."""} )
A__ = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Push saved tokenizer to the hub."""} )
@dataclass
class UpperCamelCase :
A__ = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""} )
A__ = field(
default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path to the dataset to pretokenize."""} )
A__ = field(
default="""tokenized-codeparrot-train""" , metadata={"""help""": """Repo name of the pretokenized data."""} )
A__ = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Number of workers used for code evaluation."""} )
@dataclass
class UpperCamelCase :
A__ = field(
default="""gpt2-large""" , metadata={"""help""": """Configuration to use for model initialization."""} )
A__ = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Tokenizer attached to model."""} )
A__ = field(default="""codeparrot""" , metadata={"""help""": """Name of the created model."""} )
A__ = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Push saved tokenizer to the hub."""} )
| 295 | 1 |
def lowerCamelCase_ ( UpperCamelCase_ = 10**12 ):
_a : Tuple = 1
_a : Optional[int] = 0
_a : Any = 1
_a : Optional[int] = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f'''{solution() = }''')
| 471 |
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
_a : str = str(bin(UpperCamelCase_ ) )[2:] # remove the leading "0b"
_a : Dict = str(bin(UpperCamelCase_ ) )[2:]
_a : str = max(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(UpperCamelCase_ ) , b_binary.zfill(UpperCamelCase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 471 | 1 |
"""simple docstring"""
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Dict = "EncodecFeatureExtractor"
_UpperCamelCase : Optional[int] = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self , a__ , a__ ):
super().__init__(a__ , a__ )
_lowerCAmelCase : Optional[Any] = self.feature_extractor
_lowerCAmelCase : Dict = False
def __A ( self , a__=None , a__=None , a__=True ):
return self.tokenizer.get_decoder_prompt_ids(task=a__ , language=a__ , no_timestamps=a__ )
def __call__( self , *a__ , **a__ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*a__ , **a__ )
_lowerCAmelCase : Any = kwargs.pop("""audio""" , a__ )
_lowerCAmelCase : Dict = kwargs.pop("""sampling_rate""" , a__ )
_lowerCAmelCase : List[Any] = kwargs.pop("""text""" , a__ )
if len(a__ ) > 0:
_lowerCAmelCase : List[str] = args[0]
_lowerCAmelCase : str = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if text is not None:
_lowerCAmelCase : Union[str, Any] = self.tokenizer(a__ , **a__ )
if audio is not None:
_lowerCAmelCase : List[str] = self.feature_extractor(a__ , *a__ , sampling_rate=a__ , **a__ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
_lowerCAmelCase : Optional[int] = audio_inputs["""input_values"""]
if "padding_mask" in audio_inputs:
_lowerCAmelCase : Union[str, Any] = audio_inputs["""padding_mask"""]
return inputs
def __A ( self , *a__ , **a__ ):
_lowerCAmelCase : Optional[int] = kwargs.pop("""audio""" , a__ )
_lowerCAmelCase : Dict = kwargs.pop("""padding_mask""" , a__ )
if len(a__ ) > 0:
_lowerCAmelCase : Optional[int] = args[0]
_lowerCAmelCase : str = args[1:]
if audio_values is not None:
return self._decode_audio(a__ , padding_mask=a__ )
else:
return self.tokenizer.batch_decode(*a__ , **a__ )
def __A ( self , *a__ , **a__ ):
return self.tokenizer.decode(*a__ , **a__ )
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : Optional[Any] = to_numpy(a__ )
_lowerCAmelCase : List[str] = audio_values.shape
if padding_mask is None:
return list(a__ )
_lowerCAmelCase : Any = to_numpy(a__ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
_lowerCAmelCase : List[str] = seq_len - padding_mask.shape[-1]
_lowerCAmelCase : Optional[int] = 1 - self.feature_extractor.padding_value
_lowerCAmelCase : List[str] = np.pad(a__ , ((0, 0), (0, difference)) , """constant""" , constant_values=a__ )
_lowerCAmelCase : List[str] = audio_values.tolist()
for i in range(a__ ):
_lowerCAmelCase : Union[str, Any] = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
_lowerCAmelCase : Optional[int] = sliced_audio.reshape(a__ , -1 )
return audio_values
| 702 | """simple docstring"""
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __A ( unittest.TestCase ):
_UpperCamelCase : str = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
_UpperCamelCase : Any = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : List[Any] = AudioClassificationPipeline(model=a__ , feature_extractor=a__ )
# test with a raw waveform
_lowerCAmelCase : Optional[int] = np.zeros((34000,) )
_lowerCAmelCase : Optional[Any] = np.zeros((14000,) )
return audio_classifier, [audioa, audio]
def __A ( self , a__ , a__ ):
_lowerCAmelCase , _lowerCAmelCase : List[Any] = examples
_lowerCAmelCase : List[Any] = audio_classifier(a__ )
# by default a model is initialized with num_labels=2
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
_lowerCAmelCase : Tuple = audio_classifier(a__ , top_k=1 )
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
self.run_torchaudio(a__ )
@require_torchaudio
def __A ( self , a__ ):
import datasets
# test with a local file
_lowerCAmelCase : int = datasets.load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
_lowerCAmelCase : List[Any] = dataset[0]["""audio"""]["""array"""]
_lowerCAmelCase : str = audio_classifier(a__ )
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
@require_torch
def __A ( self ):
_lowerCAmelCase : int = """anton-l/wav2vec2-random-tiny-classifier"""
_lowerCAmelCase : Optional[Any] = pipeline("""audio-classification""" , model=a__ )
_lowerCAmelCase : Any = np.ones((8000,) )
_lowerCAmelCase : List[str] = audio_classifier(a__ , top_k=4 )
_lowerCAmelCase : List[str] = [
{"""score""": 0.0_8_4_2, """label""": """no"""},
{"""score""": 0.0_8_3_8, """label""": """up"""},
{"""score""": 0.0_8_3_7, """label""": """go"""},
{"""score""": 0.0_8_3_4, """label""": """right"""},
]
_lowerCAmelCase : str = [
{"""score""": 0.0_8_4_5, """label""": """stop"""},
{"""score""": 0.0_8_4_4, """label""": """on"""},
{"""score""": 0.0_8_4_1, """label""": """right"""},
{"""score""": 0.0_8_3_4, """label""": """left"""},
]
self.assertIn(nested_simplify(a__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
_lowerCAmelCase : int = {"""array""": np.ones((8000,) ), """sampling_rate""": audio_classifier.feature_extractor.sampling_rate}
_lowerCAmelCase : int = audio_classifier(a__ , top_k=4 )
self.assertIn(nested_simplify(a__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def __A ( self ):
import datasets
_lowerCAmelCase : Optional[Any] = """superb/wav2vec2-base-superb-ks"""
_lowerCAmelCase : List[str] = pipeline("""audio-classification""" , model=a__ )
_lowerCAmelCase : str = datasets.load_dataset("""anton-l/superb_dummy""" , """ks""" , split="""test""" )
_lowerCAmelCase : Optional[Any] = np.array(dataset[3]["""speech"""] , dtype=np.floataa )
_lowerCAmelCase : List[str] = audio_classifier(a__ , top_k=4 )
self.assertEqual(
nested_simplify(a__ , decimals=3 ) , [
{"""score""": 0.9_8_1, """label""": """go"""},
{"""score""": 0.0_0_7, """label""": """up"""},
{"""score""": 0.0_0_6, """label""": """_unknown_"""},
{"""score""": 0.0_0_1, """label""": """down"""},
] , )
@require_tf
@unittest.skip("""Audio classification is not implemented for TF""" )
def __A ( self ):
pass
| 663 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
lowerCamelCase = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_57, 3_66, 4_38, 5_32, 6_85,
7_05, 7_96, 9_30, 10_58, 12_20, 12_67, 12_79, 13_03, 13_43, 13_77,
13_91, 16_35, 17_82, 18_75, 21_62, 23_61, 24_88, 34_67, 40_08, 42_11,
46_00, 48_08, 52_99, 58_55, 63_29, 72_03, 96_09, 99_59, 1_05_63, 1_07_86,
1_14_20, 1_17_09, 1_19_07, 1_31_63, 1_36_97, 1_37_00, 1_48_08, 1_53_06, 1_64_10, 1_67_91,
1_79_92, 1_92_03, 1_95_10, 2_07_24, 2_23_05, 2_29_35, 2_70_07, 3_01_09, 3_04_20, 3_34_09,
3_49_49, 4_02_83, 4_04_93, 4_05_49, 4_72_82, 4_91_46, 5_02_57, 5_03_59, 5_03_60, 5_03_61
]
lowerCamelCase = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_59, 5_03, 5_22, 5_42, 8_73,
8_93, 9_02, 9_18, 9_22, 9_31, 13_50, 18_53, 19_82, 24_60, 26_27,
32_46, 32_53, 32_68, 35_36, 38_46, 39_61, 41_83, 46_67, 65_85, 66_47,
72_73, 90_61, 93_83, 1_04_28, 1_09_29, 1_19_38, 1_20_33, 1_23_31, 1_25_62, 1_37_93,
1_41_57, 1_46_35, 1_52_65, 1_56_18, 1_65_53, 1_66_04, 1_83_62, 1_89_56, 2_00_75, 2_16_75,
2_25_20, 2_61_30, 2_61_61, 2_64_35, 2_82_79, 2_94_64, 3_16_50, 3_23_02, 3_24_70, 3_68_65,
4_28_63, 4_74_25, 4_98_70, 5_02_54, 5_02_58, 5_03_60, 5_03_61, 5_03_62
]
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A :str = "whisper"
A :Tuple = ["past_key_values"]
A :int = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , __UpperCAmelCase=5_1865 , __UpperCAmelCase=80 , __UpperCAmelCase=6 , __UpperCAmelCase=4 , __UpperCAmelCase=6 , __UpperCAmelCase=4 , __UpperCAmelCase=1536 , __UpperCAmelCase=1536 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=5_0257 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase="gelu" , __UpperCAmelCase=256 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=False , __UpperCAmelCase=1500 , __UpperCAmelCase=448 , __UpperCAmelCase=5_0256 , __UpperCAmelCase=5_0256 , __UpperCAmelCase=5_0256 , __UpperCAmelCase=None , __UpperCAmelCase=[220, 5_0256] , __UpperCAmelCase=False , __UpperCAmelCase=256 , __UpperCAmelCase=False , __UpperCAmelCase=0.0_5 , __UpperCAmelCase=10 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=10 , __UpperCAmelCase=0 , __UpperCAmelCase=7 , **__UpperCAmelCase , ):
"""simple docstring"""
a__ : List[str] = vocab_size
a__ : List[str] = num_mel_bins
a__ : str = d_model
a__ : Union[str, Any] = encoder_layers
a__ : int = encoder_attention_heads
a__ : List[Any] = decoder_layers
a__ : Any = decoder_attention_heads
a__ : Dict = decoder_ffn_dim
a__ : Tuple = encoder_ffn_dim
a__ : Union[str, Any] = dropout
a__ : Optional[int] = attention_dropout
a__ : Union[str, Any] = activation_dropout
a__ : Optional[int] = activation_function
a__ : List[Any] = init_std
a__ : List[str] = encoder_layerdrop
a__ : str = decoder_layerdrop
a__ : List[Any] = use_cache
a__ : Dict = encoder_layers
a__ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
a__ : Optional[int] = max_source_positions
a__ : Tuple = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
a__ : int = classifier_proj_size
a__ : Union[str, Any] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a__ : str = apply_spec_augment
a__ : Dict = mask_time_prob
a__ : List[Any] = mask_time_length
a__ : Union[str, Any] = mask_time_min_masks
a__ : List[str] = mask_feature_prob
a__ : Dict = mask_feature_length
a__ : Any = mask_feature_min_masks
a__ : Union[str, Any] = median_filter_width
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , suppress_tokens=__UpperCAmelCase , begin_suppress_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _A ( self ):
"""simple docstring"""
a__ : Any = OrderedDict(
[
("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}),
] )
if self.use_past:
a__ : Dict = {0: "batch"}
else:
a__ : Union[str, Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__UpperCAmelCase , direction="inputs" )
return common_inputs
def _A ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = 2_2050 , __UpperCAmelCase = 5.0 , __UpperCAmelCase = 220 , ):
"""simple docstring"""
a__ : Optional[Any] = OrderedDict()
a__ : str = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=__UpperCAmelCase , framework=__UpperCAmelCase , sampling_rate=__UpperCAmelCase , time_duration=__UpperCAmelCase , frequency=__UpperCAmelCase , )
a__ : Optional[Any] = encoder_inputs["input_features"].shape[2]
a__ : Optional[Any] = encoder_sequence_length // 2 if self.use_past else seq_length
a__ : int = super().generate_dummy_inputs(
preprocessor.tokenizer , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
a__ : List[Any] = encoder_inputs.pop("input_features" )
a__ : Dict = decoder_inputs.pop("decoder_input_ids" )
if "past_key_values" in decoder_inputs:
a__ : Tuple = decoder_inputs.pop("past_key_values" )
return dummy_inputs
@property
def _A ( self ):
"""simple docstring"""
return 1E-3
| 191 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A :int = ["image_processor", "tokenizer"]
A :Any = "LayoutLMv3ImageProcessor"
A :str = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ):
"""simple docstring"""
a__ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
a__ : List[str] = kwargs.pop("feature_extractor" )
a__ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , **__UpperCAmelCase , ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
a__ : List[str] = self.image_processor(images=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
a__ : Union[str, Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
a__ : Optional[Any] = features["words"]
a__ : Union[str, Any] = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
# add pixel values
a__ : Dict = features.pop("pixel_values" )
if return_overflowing_tokens is True:
a__ : str = self.get_overflowing_images(__UpperCAmelCase , encoded_inputs["overflow_to_sample_mapping"] )
a__ : Tuple = images
return encoded_inputs
def _A ( self , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
a__ : Any = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f' {len(__UpperCAmelCase )} and {len(__UpperCAmelCase )}' )
return images_with_overflow
def _A ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def _A ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def _A ( self ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _A ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __UpperCAmelCase , )
return self.image_processor_class
@property
def _A ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __UpperCAmelCase , )
return self.image_processor
| 191 | 1 |
def __UpperCamelCase ( _A ):
if not isinstance(_A , _A ):
raise ValueError('''check_bouncy() accepts only integer arguments''' )
lowerCAmelCase_ = str(_A )
lowerCAmelCase_ = ''.join(sorted(_A ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def __UpperCamelCase ( _A = 99 ):
if not 0 < percent < 100:
raise ValueError('''solution() only accepts values from 0 to 100''' )
lowerCAmelCase_ = 0
lowerCAmelCase_ = 1
while True:
if check_bouncy(_A ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"{solution(99)}")
| 711 |
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A ( __UpperCAmelCase ):
def __init__( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=UpperCamelCase__, unet=UpperCamelCase__, scheduler=UpperCamelCase__ )
@torch.no_grad()
def __call__( self, UpperCamelCase__ = 1, UpperCamelCase__ = None, UpperCamelCase__ = 0.0, UpperCamelCase__ = 50, UpperCamelCase__ = "pil", UpperCamelCase__ = True, **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=UpperCamelCase__, )
lowerCAmelCase_ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase_ = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(UpperCamelCase__ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
lowerCAmelCase_ = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase_ = {}
if accepts_eta:
lowerCAmelCase_ = eta
for t in self.progress_bar(self.scheduler.timesteps ):
lowerCAmelCase_ = self.scheduler.scale_model_input(UpperCamelCase__, UpperCamelCase__ )
# predict the noise residual
lowerCAmelCase_ = self.unet(UpperCamelCase__, UpperCamelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase_ = self.scheduler.step(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, **UpperCamelCase__ ).prev_sample
# decode the image latents with the VAE
lowerCAmelCase_ = self.vqvae.decode(UpperCamelCase__ ).sample
lowerCAmelCase_ = (image / 2 + 0.5).clamp(0, 1 )
lowerCAmelCase_ = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
lowerCAmelCase_ = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase__ )
| 325 | 0 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=10 , __UpperCAmelCase=3 , __UpperCAmelCase=32 * 4 , __UpperCAmelCase=32 * 6 , __UpperCAmelCase=4 , __UpperCAmelCase=32 , ):
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = is_training
__lowerCamelCase = use_auxiliary_loss
__lowerCamelCase = num_queries
__lowerCamelCase = num_channels
__lowerCamelCase = min_size
__lowerCamelCase = max_size
__lowerCamelCase = num_labels
__lowerCamelCase = mask_feature_size
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
a_ )
__lowerCamelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=a_ )
__lowerCamelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=a_ ) > 0.5
).float()
__lowerCamelCase = (torch.rand((self.batch_size, self.num_labels) , device=a_ ) > 0.5).long()
__lowerCamelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCamelCase ( self ):
'''simple docstring'''
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = output.encoder_hidden_states
__lowerCamelCase = output.pixel_decoder_hidden_states
__lowerCamelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(a_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(a_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(a_ ) , config.decoder_config.decoder_layers )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ):
'''simple docstring'''
with torch.no_grad():
__lowerCamelCase = MaskFormerModel(config=a_ )
model.to(a_ )
model.eval()
__lowerCamelCase = model(pixel_values=a_ , pixel_mask=a_ )
__lowerCamelCase = model(a_ , output_hidden_states=a_ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(a_ , a_ )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = MaskFormerForInstanceSegmentation(config=a_ )
model.to(a_ )
model.eval()
def comm_check_on_output(__UpperCAmelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__lowerCamelCase = model(pixel_values=a_ , pixel_mask=a_ )
__lowerCamelCase = model(a_ )
comm_check_on_output(a_ )
__lowerCamelCase = model(
pixel_values=a_ , pixel_mask=a_ , mask_labels=a_ , class_labels=a_ )
comm_check_on_output(a_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __lowerCAmelCase ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
lowerCAmelCase__ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
lowerCAmelCase__ = (
{'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = MaskFormerModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=a_ , has_text_modality=a_ )
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(a_ , **a_ , output_hidden_states=a_ )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*a_ )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(a_ )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a_ )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
for model_name in ["facebook/maskformer-swin-small-coco"]:
__lowerCamelCase = MaskFormerModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = (self.model_tester.min_size,) * 2
__lowerCamelCase = {
"pixel_values": torch.randn((2, 3, *size) , device=a_ ),
"mask_labels": torch.randn((2, 10, *size) , device=a_ ),
"class_labels": torch.zeros(2 , 10 , device=a_ ).long(),
}
__lowerCamelCase = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(a_ )
__lowerCamelCase = model(**a_ )
self.assertTrue(outputs.loss is not None )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(a_ , **a_ , output_hidden_states=a_ )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(a_ ).to(a_ )
__lowerCamelCase = model(**a_ , output_attentions=a_ )
self.assertTrue(outputs.attentions is not None )
def lowerCamelCase ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
__lowerCamelCase = self.all_model_classes[1]
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = model_class(a_ )
model.to(a_ )
model.train()
__lowerCamelCase = model(a_ , mask_labels=a_ , class_labels=a_ ).loss
loss.backward()
def lowerCamelCase ( self ):
'''simple docstring'''
# only MaskFormerForInstanceSegmentation has the loss
__lowerCamelCase = self.all_model_classes[1]
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = model_class(a_ )
model.to(a_ )
model.train()
__lowerCamelCase = model(a_ , mask_labels=a_ , class_labels=a_ )
__lowerCamelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__lowerCamelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
__lowerCamelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__lowerCamelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=a_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
a_ = 1E-4
def a__ ( ):
__lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self ):
'''simple docstring'''
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(a_ )
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(a_ , return_tensors='''pt''' ).to(a_ )
__lowerCamelCase = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(a_ , (1, 3, 800, 1088) )
with torch.no_grad():
__lowerCamelCase = model(**a_ )
__lowerCamelCase = torch.tensor(
[[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(a_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , a_ , atol=a_ ) )
__lowerCamelCase = torch.tensor(
[[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(a_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , a_ , atol=a_ ) )
__lowerCamelCase = torch.tensor(
[[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(a_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , a_ , atol=a_ ) )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(a_ )
.eval()
)
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(a_ , return_tensors='''pt''' ).to(a_ )
__lowerCamelCase = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(a_ , (1, 3, 800, 1088) )
with torch.no_grad():
__lowerCamelCase = model(**a_ )
# masks_queries_logits
__lowerCamelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
__lowerCamelCase = [
[-1.3_737_124, -1.7_724_937, -1.9_364_233],
[-1.5_977_281, -1.9_867_939, -2.1_523_695],
[-1.5_795_398, -1.9_269_832, -2.093_942],
]
__lowerCamelCase = torch.tensor(a_ ).to(a_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , a_ , atol=a_ ) )
# class_queries_logits
__lowerCamelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
__lowerCamelCase = torch.tensor(
[
[1.6_5_1_2E0_0, -5.2_5_7_2E0_0, -3.3_5_1_9E0_0],
[3.6_1_6_9E-0_2, -5.9_0_2_5E0_0, -2.9_3_1_3E0_0],
[1.0_7_6_6E-0_4, -7.7_6_3_0E0_0, -5.1_2_6_3E0_0],
] ).to(a_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , a_ , atol=a_ ) )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(a_ )
.eval()
)
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(a_ , return_tensors='''pt''' ).to(a_ )
__lowerCamelCase = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(a_ , (1, 3, 800, 1088) )
with torch.no_grad():
__lowerCamelCase = model(**a_ )
# masks_queries_logits
__lowerCamelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
__lowerCamelCase = [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]]
__lowerCamelCase = torch.tensor(a_ ).to(a_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , a_ , atol=a_ ) )
# class_queries_logits
__lowerCamelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
__lowerCamelCase = torch.tensor(
[[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , a_ , atol=a_ ) )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(a_ )
.eval()
)
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , )
__lowerCamelCase = inputs["pixel_values"].to(a_ )
__lowerCamelCase = [el.to(a_ ) for el in inputs["mask_labels"]]
__lowerCamelCase = [el.to(a_ ) for el in inputs["class_labels"]]
with torch.no_grad():
__lowerCamelCase = model(**a_ )
self.assertTrue(outputs.loss is not None )
| 175 |
from __future__ import annotations
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : list[list[int]] = []
create_all_state(1 , lowerCAmelCase_ , lowerCAmelCase_ , [] , lowerCAmelCase_)
return result
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
if level == 0:
total_list.append(current_list[:])
return
for i in range(lowerCAmelCase_ , total_number - level + 2):
current_list.append(lowerCAmelCase_)
create_all_state(i + 1 , lowerCAmelCase_ , level - 1 , lowerCAmelCase_ , lowerCAmelCase_)
current_list.pop()
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
for i in total_list:
print(*lowerCAmelCase_)
if __name__ == "__main__":
__magic_name__ = 4
__magic_name__ = 2
__magic_name__ = generate_all_combinations(n, k)
print_all_state(total_list)
| 250 | 0 |
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Any = (KDPMaDiscreteScheduler,)
lowerCAmelCase__ : int = 10
def a__ ( self : Union[str, Any] , **_UpperCAmelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = {
'num_train_timesteps': 11_00,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**_UpperCAmelCase )
return config
def a__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def a__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def a__ ( self : str ) -> Dict:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def a__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def a__ ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(prediction_type='v_prediction' )
__lowercase = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowercase = sample.to(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowercase = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = model(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = output.prev_sample
__lowercase = torch.sum(torch.abs(_UpperCAmelCase ) )
__lowercase = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4e-0_7 ) < 1e-2
assert abs(result_mean.item() - 6.1_1_1_2e-1_0 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2e-0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_002 ) < 1e-3
def a__ ( self : str ) -> Dict:
"""simple docstring"""
if torch_device == "mps":
return
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowercase = sample.to(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowercase = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = model(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = output.prev_sample
__lowercase = torch.sum(torch.abs(_UpperCAmelCase ) )
__lowercase = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
def a__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if torch_device == "mps":
return
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCAmelCase )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter.to(_UpperCAmelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__lowercase = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = model(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = output.prev_sample
__lowercase = torch.sum(torch.abs(_UpperCAmelCase ) )
__lowercase = torch.mean(torch.abs(_UpperCAmelCase ) )
if str(_UpperCAmelCase ).startswith('cpu' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
| 688 |
from __future__ import annotations
import math
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int ) -> float:
__lowercase = u
for i in range(1 , SCREAMING_SNAKE_CASE ):
__lowercase = temp * (u - i)
return temp
def __SCREAMING_SNAKE_CASE ( ) -> None:
__lowercase = int(input('enter the numbers of values: ' ) )
__lowercase = []
for _ in range(SCREAMING_SNAKE_CASE ):
y.append([] )
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
y[i].append(SCREAMING_SNAKE_CASE )
__lowercase = 0
print('enter the values of parameters in a list: ' )
__lowercase = list(map(SCREAMING_SNAKE_CASE , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(SCREAMING_SNAKE_CASE ):
__lowercase = float(input() )
__lowercase = int(input('enter the value to interpolate: ' ) )
__lowercase = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , SCREAMING_SNAKE_CASE ):
for j in range(n - i ):
__lowercase = y[j + 1][i - 1] - y[j][i - 1]
__lowercase = y[0][0]
for i in range(1 , SCREAMING_SNAKE_CASE ):
summ += (ucal(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) * y[0][i]) / math.factorial(SCREAMING_SNAKE_CASE )
print(F"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 688 | 1 |
'''simple docstring'''
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__snake_case : int = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class lowerCamelCase :
'''simple docstring'''
__snake_case = PegasusConfig
__snake_case = {}
__snake_case = 'gelu'
def __init__( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict=13 , lowerCAmelCase_ : List[Any]=7 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : List[str]=99 , lowerCAmelCase_ : Union[str, Any]=32 , lowerCAmelCase_ : List[str]=5 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : List[str]=37 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Optional[Any]=20 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : int=0 , ) -> Optional[Any]:
'''simple docstring'''
A__ : Any =parent
A__ : str =batch_size
A__ : List[Any] =seq_length
A__ : Any =is_training
A__ : int =use_labels
A__ : Any =vocab_size
A__ : List[Any] =hidden_size
A__ : Optional[int] =num_hidden_layers
A__ : Union[str, Any] =num_attention_heads
A__ : List[str] =intermediate_size
A__ : Optional[Any] =hidden_dropout_prob
A__ : Dict =attention_probs_dropout_prob
A__ : Any =max_position_embeddings
A__ : Tuple =eos_token_id
A__ : Optional[Any] =pad_token_id
A__ : Union[str, Any] =bos_token_id
def lowercase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
A__ : str =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
A__ : Tuple =np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
A__ : int =np.concatenate([input_ids, eos_tensor] , axis=1 )
A__ : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Dict =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A__ : Union[str, Any] =prepare_pegasus_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return config, inputs_dict
def lowercase__ ( self : str , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict ) -> Union[str, Any]:
'''simple docstring'''
A__ : Tuple =20
A__ : Dict =model_class_name(lowerCAmelCase_ )
A__ : Union[str, Any] =model.encode(inputs_dict["""input_ids"""] )
A__ , A__ : str =(
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
A__ : List[Any] =model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_ )
A__ : List[str] =jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
A__ : str =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
A__ : Optional[Any] =model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
A__ : Tuple =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
A__ : str =model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCAmelCase_ , )
A__ : Any =model.decode(lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Tuple =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"Max diff is {diff}" )
def lowercase__ ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any ) -> str:
'''simple docstring'''
A__ : Optional[Any] =20
A__ : str =model_class_name(lowerCAmelCase_ )
A__ : Dict =model.encode(inputs_dict["""input_ids"""] )
A__ , A__ : str =(
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
A__ : Dict =jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
A__ : Tuple =model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Optional[int] =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
A__ : Any =model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
A__ : str =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
A__ : Any =model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
A__ : str =model.decode(lowerCAmelCase_ , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ )
A__ : Optional[Any] =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"Max diff is {diff}" )
def __lowerCamelCase ( __snake_case : str, __snake_case : int, __snake_case : List[str], __snake_case : int=None, __snake_case : Union[str, Any]=None, ) -> Optional[int]:
"""simple docstring"""
if attention_mask is None:
A__ : Optional[int] =np.not_equal(__snake_case, config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
A__ : Optional[int] =np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape, dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ).astype(np.inta ),
], axis=-1, )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class lowerCamelCase ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__snake_case = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__snake_case = True
__snake_case = False
__snake_case = False
__snake_case = False
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
A__ : Optional[int] =FlaxPegasusModelTester(self )
A__ : str =ConfigTester(self , config_class=lowerCAmelCase_ )
def lowercase__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : int ) -> Dict:
'''simple docstring'''
A__ , A__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase__ ( self : List[str] ) -> int:
'''simple docstring'''
A__ , A__ : Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
A__ , A__ : Any =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A__ : Optional[int] =self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
A__ : List[str] =model_class(lowerCAmelCase_ )
@jax.jit
def encode_jitted(lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple=None , **lowerCAmelCase_ : int ):
return model.encode(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
with self.subTest("""JIT Enabled""" ):
A__ : List[Any] =encode_jitted(**lowerCAmelCase_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
A__ : Dict =encode_jitted(**lowerCAmelCase_ ).to_tuple()
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
A__ , A__ : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A__ : Any =model_class(lowerCAmelCase_ )
A__ : List[str] =model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
A__ : List[str] ={
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] ):
return model.decode(
decoder_input_ids=lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , encoder_outputs=lowerCAmelCase_ , )
with self.subTest("""JIT Enabled""" ):
A__ : Any =decode_jitted(**lowerCAmelCase_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
A__ : Dict =decode_jitted(**lowerCAmelCase_ ).to_tuple()
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowercase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
A__ : str =model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=lowerCAmelCase_ )
A__ : Dict =np.ones((1, 1) )
A__ : List[str] =model(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@slow
def lowercase__ ( self : str ) -> str:
'''simple docstring'''
A__ : Tuple =FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
A__ : Optional[Any] =PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
A__ : Dict =[
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
A__ : Dict =[
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
A__ : List[Any] =tokenizer(lowerCAmelCase_ , return_tensors="""np""" , truncation=lowerCAmelCase_ , max_length=5_12 , padding=lowerCAmelCase_ )
A__ : Union[str, Any] =model.generate(**lowerCAmelCase_ , num_beams=2 ).sequences
A__ : int =tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
assert tgt_text == decoded
| 215 |
'''simple docstring'''
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
__snake_case : Dict = logging.getLogger()
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
A__ : int =argparse.ArgumentParser()
parser.add_argument("""-f""" )
A__ : int =parser.parse_args()
return args.f
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : int ) -> None:
'''simple docstring'''
A__ : List[Any] =logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Any ) -> int:
'''simple docstring'''
A__ : Optional[Any] =get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""" )
with patch.object(lowerCAmelCase_ , """argv""" , lowerCAmelCase_ ):
A__ : Optional[Any] =run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCAmelCase_ , 0.666 )
@slow
@require_torch_non_multi_gpu
def lowercase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
A__ : Any ="""
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(lowerCAmelCase_ )
A__ : Dict ="""
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(lowerCAmelCase_ )
A__ : Tuple ="""
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(lowerCAmelCase_ )
| 215 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : List[str]= {
"""configuration_clipseg""": [
"""CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPSegConfig""",
"""CLIPSegTextConfig""",
"""CLIPSegVisionConfig""",
],
"""processing_clipseg""": ["""CLIPSegProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[Any]= [
"""CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPSegModel""",
"""CLIPSegPreTrainedModel""",
"""CLIPSegTextModel""",
"""CLIPSegVisionModel""",
"""CLIPSegForImageSegmentation""",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
A__ : Union[str, Any]= _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 713 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__( self , snake_case_ , snake_case_=100 , snake_case_=13 , snake_case_=30 , snake_case_=2 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=10 , snake_case_=0.02 , snake_case_=3 , ) -> Optional[int]:
UpperCamelCase__ = parent
UpperCamelCase__ = vocab_size
UpperCamelCase__ = batch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase__ = (image_size // patch_size) ** 2
UpperCamelCase__ = num_patches + 1
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = FlaxBeitModel(config=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = FlaxBeitForMaskedImageModeling(config=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
UpperCamelCase__ = self.type_sequence_label_size
UpperCamelCase__ = FlaxBeitForImageClassification(config=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase__ = 1
UpperCamelCase__ = FlaxBeitForImageClassification(snake_case_ )
UpperCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ = model(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class __lowerCamelCase ( _a , unittest.TestCase ):
a : int =(
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def SCREAMING_SNAKE_CASE__ ( self ) -> None:
UpperCamelCase__ = FlaxBeitModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase__ = self._prepare_for_class(snake_case_ , snake_case_ )
UpperCamelCase__ = model_class(snake_case_ )
@jax.jit
def model_jitted(snake_case_ , **snake_case_ ):
return model(pixel_values=snake_case_ , **snake_case_ )
with self.subTest('JIT Enabled' ):
UpperCamelCase__ = model_jitted(**snake_case_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
UpperCamelCase__ = model_jitted(**snake_case_ ).to_tuple()
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for jitted_output, output in zip(snake_case_ , snake_case_ ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
for model_class_name in self.all_model_classes:
UpperCamelCase__ = model_class_name.from_pretrained('microsoft/beit-base-patch16-224' )
UpperCamelCase__ = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(snake_case_ )
def lowerCAmelCase_( ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = FlaxBeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=snake_case_ , return_tensors='np' ).pixel_values
# prepare bool_masked_pos
UpperCamelCase__ = np.ones((1, 196) , dtype=snake_case_ )
# forward pass
UpperCamelCase__ = model(pixel_values=snake_case_ , bool_masked_pos=snake_case_ )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = (1, 196, 8192)
self.assertEqual(logits.shape , snake_case_ )
UpperCamelCase__ = np.array(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , snake_case_ , atol=1E-2 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=snake_case_ , return_tensors='np' )
# forward pass
UpperCamelCase__ = model(**snake_case_ )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = (1, 1000)
self.assertEqual(logits.shape , snake_case_ )
UpperCamelCase__ = np.array([-1.2_385, -1.0_987, -1.0_108] )
self.assertTrue(np.allclose(logits[0, :3] , snake_case_ , atol=1E-4 ) )
UpperCamelCase__ = 281
self.assertEqual(logits.argmax(-1 ).item() , snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=snake_case_ , return_tensors='np' )
# forward pass
UpperCamelCase__ = model(**snake_case_ )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = (1, 2_1841)
self.assertEqual(logits.shape , snake_case_ )
UpperCamelCase__ = np.array([1.6_881, -0.2_787, 0.5_901] )
self.assertTrue(np.allclose(logits[0, :3] , snake_case_ , atol=1E-4 ) )
UpperCamelCase__ = 2396
self.assertEqual(logits.argmax(-1 ).item() , snake_case_ )
| 20 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def lowerCamelCase__ ( __snake_case ) -> Optional[int]:
"""simple docstring"""
if "model" in orig_key:
_UpperCamelCase = orig_key.replace('''model.''', '''''' )
if "norm1" in orig_key:
_UpperCamelCase = orig_key.replace('''norm1''', '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
_UpperCamelCase = orig_key.replace('''norm2''', '''output.LayerNorm''' )
if "norm" in orig_key:
_UpperCamelCase = orig_key.replace('''norm''', '''LayerNorm''' )
if "transformer" in orig_key:
_UpperCamelCase = orig_key.split('''.''' )[0].split('''_''' )[-1]
_UpperCamelCase = orig_key.replace(F'''transformer_{layer_num}''', F'''encoder.layer.{layer_num}''' )
if "mha.attn" in orig_key:
_UpperCamelCase = orig_key.replace('''mha.attn''', '''attention.self''' )
if "mha" in orig_key:
_UpperCamelCase = orig_key.replace('''mha''', '''attention''' )
if "W_q" in orig_key:
_UpperCamelCase = orig_key.replace('''W_q''', '''self.query''' )
if "W_k" in orig_key:
_UpperCamelCase = orig_key.replace('''W_k''', '''self.key''' )
if "W_v" in orig_key:
_UpperCamelCase = orig_key.replace('''W_v''', '''self.value''' )
if "ff1" in orig_key:
_UpperCamelCase = orig_key.replace('''ff1''', '''intermediate.dense''' )
if "ff2" in orig_key:
_UpperCamelCase = orig_key.replace('''ff2''', '''output.dense''' )
if "ff" in orig_key:
_UpperCamelCase = orig_key.replace('''ff''', '''output.dense''' )
if "mlm_class" in orig_key:
_UpperCamelCase = orig_key.replace('''mlm.mlm_class''', '''cls.predictions.decoder''' )
if "mlm" in orig_key:
_UpperCamelCase = orig_key.replace('''mlm''', '''cls.predictions.transform''' )
if "cls" not in orig_key:
_UpperCamelCase = """yoso.""" + orig_key
return orig_key
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Union[str, Any]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_UpperCamelCase = orig_state_dict.pop(snake_case__ )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
_UpperCamelCase = val
_UpperCamelCase = orig_state_dict["""cls.predictions.decoder.bias"""]
_UpperCamelCase = torch.arange(snake_case__ ).expand((1, -1) ) + 2
return orig_state_dict
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> str:
"""simple docstring"""
_UpperCamelCase = torch.load(snake_case__, map_location='''cpu''' )["""model_state_dict"""]
_UpperCamelCase = YosoConfig.from_json_file(snake_case__ )
_UpperCamelCase = YosoForMaskedLM(snake_case__ )
_UpperCamelCase = convert_checkpoint_helper(config.max_position_embeddings, snake_case__ )
print(model.load_state_dict(snake_case__ ) )
model.eval()
model.save_pretrained(snake_case__ )
print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""", default=None, type=str, required=True, help="""Path to YOSO pytorch checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for YOSO model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_a = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 19 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__ ( lowercase_ , unittest.TestCase):
'''simple docstring'''
lowerCamelCase : List[str] = GPTaTokenizer
lowerCamelCase : Optional[int] = GPTaTokenizerFast
lowerCamelCase : List[Any] = True
lowerCamelCase : List[str] = {"add_prefix_space": True}
lowerCamelCase : Optional[int] = False
def __lowercase ( self ) -> int:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__snake_case :Optional[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
__snake_case :Dict = dict(zip(a__ , range(len(a__ ) ) ) )
__snake_case :List[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__snake_case :List[str] = {"""unk_token""": """<unk>"""}
__snake_case :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__snake_case :Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(a__ ) )
def __lowercase ( self , **a__ ) -> Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **a__ )
def __lowercase ( self , **a__ ) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **a__ )
def __lowercase ( self , a__ ) -> Optional[int]:
'''simple docstring'''
__snake_case :List[Any] = """lower newer"""
__snake_case :Any = """lower newer"""
return input_text, output_text
def __lowercase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case :Optional[int] = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__snake_case :List[Any] = """lower newer"""
__snake_case :int = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
__snake_case :Any = tokenizer.tokenize(a__ , add_prefix_space=a__ )
self.assertListEqual(a__ , a__ )
__snake_case :List[Any] = tokens + [tokenizer.unk_token]
__snake_case :List[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __lowercase ( self ) -> Dict:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__snake_case :Union[str, Any] = self.get_tokenizer()
__snake_case :Optional[Any] = self.get_rust_tokenizer(add_prefix_space=a__ )
__snake_case :Optional[int] = """lower newer"""
# Testing tokenization
__snake_case :List[Any] = tokenizer.tokenize(a__ , add_prefix_space=a__ )
__snake_case :int = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids without special tokens
__snake_case :List[str] = tokenizer.encode(a__ , add_special_tokens=a__ , add_prefix_space=a__ )
__snake_case :int = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids with special tokens
__snake_case :Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=a__ )
__snake_case :Optional[int] = tokenizer.encode(a__ , add_prefix_space=a__ )
__snake_case :Optional[int] = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
# Testing the unknown token
__snake_case :Dict = tokens + [rust_tokenizer.unk_token]
__snake_case :Optional[int] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __lowercase ( self , *a__ , **a__ ) -> Any:
'''simple docstring'''
pass
def __lowercase ( self , a__=15 ) -> Optional[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__snake_case :Optional[int] = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
# Simple input
__snake_case :List[str] = """This is a simple input"""
__snake_case :List[str] = ["""This is a simple input 1""", """This is a simple input 2"""]
__snake_case :Tuple = ("""This is a simple input""", """This is a pair""")
__snake_case :Union[str, Any] = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
def __lowercase ( self ) -> int:
'''simple docstring'''
__snake_case :Union[str, Any] = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
__snake_case :Tuple = """This is a simple input"""
__snake_case :Optional[int] = ["""This is a simple input looooooooong""", """This is a simple input"""]
__snake_case :List[str] = ("""This is a simple input""", """This is a pair""")
__snake_case :List[str] = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
__snake_case :Tuple = tokenizer.pad_token_id
__snake_case :int = tokenizer(a__ , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
__snake_case :Optional[Any] = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="""np""" )
__snake_case :List[Any] = tokenizer(*a__ , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
__snake_case :Optional[int] = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def __lowercase ( self ) -> List[str]:
'''simple docstring'''
__snake_case :Optional[int] = """$$$"""
__snake_case :Optional[Any] = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=a__ , add_bos_token=a__ )
__snake_case :List[Any] = """This is a simple input"""
__snake_case :Tuple = ["""This is a simple input 1""", """This is a simple input 2"""]
__snake_case :Tuple = tokenizer.bos_token_id
__snake_case :List[str] = tokenizer(a__ )
__snake_case :Any = tokenizer(a__ )
self.assertEqual(out_s.input_ids[0] , a__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__snake_case :int = tokenizer.decode(out_s.input_ids )
__snake_case :Tuple = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , a__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def __lowercase ( self ) -> str:
'''simple docstring'''
pass
def __lowercase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case :List[str] = [self.get_tokenizer(do_lower_case=a__ , add_bos_token=a__ )]
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__snake_case :Tuple = """Encode this."""
__snake_case :Tuple = """This one too please."""
__snake_case :Union[str, Any] = tokenizer.encode(a__ , add_special_tokens=a__ )
encoded_sequence += tokenizer.encode(a__ , add_special_tokens=a__ )
__snake_case :List[str] = tokenizer.encode_plus(
a__ , a__ , add_special_tokens=a__ , return_special_tokens_mask=a__ , )
__snake_case :Union[str, Any] = encoded_sequence_dict["""input_ids"""]
__snake_case :Optional[Any] = encoded_sequence_dict["""special_tokens_mask"""]
self.assertEqual(len(a__ ) , len(a__ ) )
__snake_case :Optional[int] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(a__ )
]
__snake_case :str = [x for x in filtered_sequence if x is not None]
self.assertEqual(a__ , a__ )
@require_tokenizers
class snake_case__ ( unittest.TestCase):
'''simple docstring'''
def __lowercase ( self ) -> Any:
'''simple docstring'''
__snake_case :Union[str, Any] = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=a__ )
__snake_case :Union[str, Any] = """A photo of a cat"""
__snake_case :int = tokenizer.encode(
a__ , )
self.assertEqual(a__ , [2, 2_50, 13_45, 9, 10, 47_58] )
tokenizer.save_pretrained("""test_opt""" )
__snake_case :int = AutoTokenizer.from_pretrained("""./test_opt""" )
__snake_case :Tuple = tokenizer.encode(
a__ , )
self.assertEqual(a__ , [2, 2_50, 13_45, 9, 10, 47_58] )
def __lowercase ( self ) -> Any:
'''simple docstring'''
__snake_case :Tuple = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , use_slow=a__ )
__snake_case :str = """A photo of a cat"""
__snake_case :List[str] = tokenizer.encode(
a__ , )
# Same as above
self.assertEqual(a__ , [2, 2_50, 13_45, 9, 10, 47_58] )
@unittest.skip("""This test is failing because of a bug in the fast tokenizer""" )
def __lowercase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case :int = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=a__ )
__snake_case :Optional[Any] = """bos"""
__snake_case :int = tokenizer.get_vocab()["""bos"""]
__snake_case :str = """A photo of a cat"""
__snake_case :int = tokenizer.encode(
a__ , )
# We changed the bos token
self.assertEqual(a__ , [3_19_57, 2_50, 13_45, 9, 10, 47_58] )
tokenizer.save_pretrained("""./tok""" )
__snake_case :Dict = AutoTokenizer.from_pretrained("""./tok""" )
self.assertTrue(tokenizer.is_fast )
__snake_case :List[Any] = tokenizer.encode(
a__ , )
self.assertEqual(a__ , [3_19_57, 2_50, 13_45, 9, 10, 47_58] )
| 455 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase = {
"vocab_file": {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"
),
}
}
UpperCamelCase = {
"junnyu/roformer_chinese_small": 1_536,
"junnyu/roformer_chinese_base": 1_536,
"junnyu/roformer_chinese_char_small": 512,
"junnyu/roformer_chinese_char_base": 512,
"junnyu/roformer_small_discriminator": 128,
"junnyu/roformer_small_generator": 128,
}
UpperCamelCase = {
"junnyu/roformer_chinese_small": {"do_lower_case": True},
"junnyu/roformer_chinese_base": {"do_lower_case": True},
"junnyu/roformer_chinese_char_small": {"do_lower_case": True},
"junnyu/roformer_chinese_char_base": {"do_lower_case": True},
"junnyu/roformer_small_discriminator": {"do_lower_case": True},
"junnyu/roformer_small_generator": {"do_lower_case": True},
}
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Optional[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Union[str, Any] = RoFormerTokenizer
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase="[UNK]" , _lowerCAmelCase="[SEP]" , _lowerCAmelCase="[PAD]" , _lowerCAmelCase="[CLS]" , _lowerCAmelCase="[MASK]" , _lowerCAmelCase=True , _lowerCAmelCase=None , **_lowerCAmelCase , ):
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
_lowercase : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('lowercase' , _lowerCAmelCase ) != do_lower_case
or pre_tok_state.get('strip_accents' , _lowerCAmelCase ) != strip_accents
):
_lowercase : Any = getattr(_lowerCAmelCase , pre_tok_state.pop('type' ) )
_lowercase : Union[str, Any] = do_lower_case
_lowercase : Optional[int] = strip_accents
_lowercase : int = pre_tok_class(**_lowerCAmelCase )
_lowercase : Tuple = do_lower_case
def __getstate__( self ):
_lowercase : Tuple = self.__dict__.copy()
_lowercase : str = BertPreTokenizer()
return state
def __setstate__( self , _lowerCAmelCase ):
_lowercase : int = d
_lowercase : Any = self.__dict__['_tokenizer'].get_vocab()
_lowercase : int = PreTokenizer.custom(JiebaPreTokenizer(_lowerCAmelCase ) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase=None ):
_lowercase : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
_lowercase : Optional[Any] = [self.sep_token_id]
_lowercase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
_lowercase : int = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=False , **_lowerCAmelCase , ):
_lowercase : str = BertPreTokenizer()
return super().save_pretrained(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
| 702 |
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
UpperCamelCase = logging.getLogger(__name__)
UpperCamelCase = {"facebook/bart-base": BartForConditionalGeneration}
UpperCamelCase = {"facebook/bart-base": BartTokenizer}
def __magic_name__ ( ) -> str:
_lowercase : Optional[int] = argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.' )
parser.add_argument(
'--validation_file' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='A csv or a json file containing the validation data.' )
parser.add_argument(
'--max_length' , type=SCREAMING_SNAKE_CASE , default=5 , help='The maximum total input sequence length after tokenization.' , )
parser.add_argument(
'--num_beams' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help=(
'Number of beams to use for evaluation. This argument will be '
'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'
) , )
parser.add_argument(
'--model_name_or_path' , type=SCREAMING_SNAKE_CASE , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=SCREAMING_SNAKE_CASE , )
parser.add_argument(
'--config_name' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='Pretrained config name or path if not the same as model_name' , )
parser.add_argument(
'--device' , type=SCREAMING_SNAKE_CASE , default='cpu' , help='Device where the model will be run' , )
parser.add_argument('--output_file_path' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='Where to store the final ONNX file.' )
_lowercase : Optional[Any] = parser.parse_args()
return args
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="cpu" ) -> List[Any]:
_lowercase : Dict = model_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
_lowercase : int = tokenizer_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE )
if model_name in ["facebook/bart-base"]:
_lowercase : Dict = 0
_lowercase : Optional[int] = None
_lowercase : Union[str, Any] = 0
return huggingface_model, tokenizer
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
model.eval()
_lowercase : List[Any] = None
_lowercase : List[str] = torch.jit.script(BARTBeamSearchGenerator(SCREAMING_SNAKE_CASE ) )
with torch.no_grad():
_lowercase : Optional[int] = 'My friends are cool but they eat too many carbs.'
_lowercase : int = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors='pt' ).to(model.device )
_lowercase : str = model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , early_stopping=SCREAMING_SNAKE_CASE , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
SCREAMING_SNAKE_CASE , (
inputs['input_ids'],
inputs['attention_mask'],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , SCREAMING_SNAKE_CASE , opset_version=14 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'seq'},
'output_ids': {0: 'batch', 1: 'seq_out'},
} , example_outputs=SCREAMING_SNAKE_CASE , )
logger.info('Model exported to {}'.format(SCREAMING_SNAKE_CASE ) )
_lowercase : str = remove_dup_initializers(os.path.abspath(SCREAMING_SNAKE_CASE ) )
logger.info('Deduplicated and optimized model written to {}'.format(SCREAMING_SNAKE_CASE ) )
_lowercase : Union[str, Any] = onnxruntime.InferenceSession(SCREAMING_SNAKE_CASE )
_lowercase : Union[str, Any] = ort_sess.run(
SCREAMING_SNAKE_CASE , {
'input_ids': inputs['input_ids'].cpu().numpy(),
'attention_mask': inputs['attention_mask'].cpu().numpy(),
'num_beams': np.array(SCREAMING_SNAKE_CASE ),
'max_length': np.array(SCREAMING_SNAKE_CASE ),
'decoder_start_token_id': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('Model outputs from torch and ONNX Runtime are similar.' )
logger.info('Success.' )
def __magic_name__ ( ) -> Any:
_lowercase : Dict = parse_args()
_lowercase : Union[str, Any] = 5
_lowercase : Union[str, Any] = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_lowercase : Optional[Any] = torch.device(args.device )
_lowercase , _lowercase : List[Any] = load_model_tokenizer(args.model_name_or_path , SCREAMING_SNAKE_CASE )
if model.config.decoder_start_token_id is None:
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined' )
model.to(SCREAMING_SNAKE_CASE )
if args.max_length:
_lowercase : Any = args.max_length
if args.num_beams:
_lowercase : List[str] = args.num_beams
if args.output_file_path:
_lowercase : Union[str, Any] = args.output_file_path
else:
_lowercase : Tuple = 'BART.onnx'
logger.info('Exporting model to ONNX' )
export_and_validate_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 677 | 0 |
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def lowerCamelCase__ ( __A :bool = True ,*__A :Tuple ,**__A :List[Any] ):
"""simple docstring"""
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
__snake_case = False
if main_process_only:
__snake_case = PartialState().local_process_index == 0
return _tqdm(*__A ,**__A ,disable=__A )
| 268 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase__ = {
'''configuration_convnext''': ['''CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvNextConfig''', '''ConvNextOnnxConfig''']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''ConvNextFeatureExtractor''']
UpperCamelCase__ = ['''ConvNextImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvNextForImageClassification''',
'''ConvNextModel''',
'''ConvNextPreTrainedModel''',
'''ConvNextBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''TFConvNextForImageClassification''',
'''TFConvNextModel''',
'''TFConvNextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 268 | 1 |
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __lowerCAmelCase (_UpperCamelCase ):
if is_torch_version('<' , '2.0.0' ) or not hasattr(_UpperCamelCase , '_dynamo' ):
return False
return isinstance(_UpperCamelCase , torch._dynamo.eval_frame.OptimizedModule )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase = True ):
__lowerCAmelCase = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__lowerCAmelCase = is_compiled_module(_UpperCamelCase )
if is_compiled:
__lowerCAmelCase = model
__lowerCAmelCase = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase = model.module
if not keep_fpaa_wrapper:
__lowerCAmelCase = getattr(_UpperCamelCase , 'forward' )
__lowerCAmelCase = model.__dict__.pop('_original_forward' , _UpperCamelCase )
if original_forward is not None:
while hasattr(_UpperCamelCase , '__wrapped__' ):
__lowerCAmelCase = forward.__wrapped__
if forward == original_forward:
break
__lowerCAmelCase = forward
if getattr(_UpperCamelCase , '_converted_to_transformer_engine' , _UpperCamelCase ):
convert_model(_UpperCamelCase , to_transformer_engine=_UpperCamelCase )
if is_compiled:
__lowerCAmelCase = model
__lowerCAmelCase = compiled_model
return model
def __lowerCAmelCase ():
PartialState().wait_for_everyone()
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_UpperCamelCase , _UpperCamelCase )
elif PartialState().local_process_index == 0:
torch.save(_UpperCamelCase , _UpperCamelCase )
@contextmanager
def __lowerCAmelCase (**_UpperCamelCase ):
for key, value in kwargs.items():
__lowerCAmelCase = str(_UpperCamelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __lowerCAmelCase (_UpperCamelCase ):
if not hasattr(_UpperCamelCase , '__qualname__' ) and not hasattr(_UpperCamelCase , '__name__' ):
__lowerCAmelCase = getattr(_UpperCamelCase , '__class__' , _UpperCamelCase )
if hasattr(_UpperCamelCase , '__qualname__' ):
return obj.__qualname__
if hasattr(_UpperCamelCase , '__name__' ):
return obj.__name__
return str(_UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
for key, value in source.items():
if isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase = destination.setdefault(_UpperCamelCase , {} )
merge_dicts(_UpperCamelCase , _UpperCamelCase )
else:
__lowerCAmelCase = value
return destination
def __lowerCAmelCase (_UpperCamelCase = None ):
if port is None:
__lowerCAmelCase = 2_9500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0 | 702 |
"""simple docstring"""
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
lowerCamelCase__ = data_utils.TransfoXLTokenizer
lowerCamelCase__ = data_utils.TransfoXLCorpus
lowerCamelCase__ = data_utils
lowerCamelCase__ = data_utils
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(_UpperCamelCase , 'rb' ) as fp:
__lowerCAmelCase : int = pickle.load(_UpperCamelCase , encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
__lowerCAmelCase : Any = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print(F"Save vocabulary to {pytorch_vocab_dump_path}" )
__lowerCAmelCase : Optional[Any] = corpus.vocab.__dict__
torch.save(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : Dict = corpus.__dict__
corpus_dict_no_vocab.pop('vocab' , _UpperCamelCase )
__lowerCAmelCase : List[str] = pytorch_dump_folder_path + '/' + CORPUS_NAME
print(F"Save dataset to {pytorch_dataset_dump_path}" )
torch.save(_UpperCamelCase , _UpperCamelCase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
__lowerCAmelCase : Tuple = os.path.abspath(_UpperCamelCase )
__lowerCAmelCase : List[Any] = os.path.abspath(_UpperCamelCase )
print(F"Converting Transformer XL checkpoint from {tf_path} with config at {config_path}." )
# Initialise PyTorch model
if transfo_xl_config_file == "":
__lowerCAmelCase : Any = TransfoXLConfig()
else:
__lowerCAmelCase : str = TransfoXLConfig.from_json_file(_UpperCamelCase )
print(F"Building PyTorch model from configuration: {config}" )
__lowerCAmelCase : Optional[Any] = TransfoXLLMHeadModel(_UpperCamelCase )
__lowerCAmelCase : Dict = load_tf_weights_in_transfo_xl(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
__lowerCAmelCase : str = os.path.join(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : Tuple = os.path.join(_UpperCamelCase , _UpperCamelCase )
print(F"Save PyTorch model to {os.path.abspath(_UpperCamelCase )}" )
torch.save(model.state_dict() , _UpperCamelCase )
print(F"Save configuration file to {os.path.abspath(_UpperCamelCase )}" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--tf_checkpoint_path""",
default="""""",
type=str,
help="""An optional path to a TensorFlow checkpoint path to be converted.""",
)
parser.add_argument(
"""--transfo_xl_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--transfo_xl_dataset_file""",
default="""""",
type=str,
help="""An optional dataset file to be converted in a vocabulary.""",
)
lowerCamelCase__ = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
) | 549 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import MutableSequence
class __a :
'''simple docstring'''
def __init__( self , _lowerCamelCase , _lowerCamelCase ) -> Any:
'''simple docstring'''
if len(_lowercase ) != degree + 1:
raise ValueError(
"The number of coefficients should be equal to the degree + 1." )
__lowercase = list(_lowercase )
__lowercase = degree
def __add__( self , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
if self.degree > polynomial_a.degree:
__lowercase = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , _lowercase )
else:
__lowercase = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , _lowercase )
def __sub__( self , _lowerCamelCase ) -> Any:
'''simple docstring'''
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self ) -> Any:
'''simple docstring'''
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , _lowercase )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> List[str]:
'''simple docstring'''
__lowercase = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = ""
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(_lowercase )
return polynomial
def __repr__( self ) -> Optional[int]:
'''simple docstring'''
return self.__str__()
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = [0] * self.degree
for i in range(self.degree ):
__lowercase = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , _lowercase )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase = 0 ) -> List[str]:
'''simple docstring'''
__lowercase = [0] * (self.degree + 2)
__lowercase = constant
for i in range(self.degree + 1 ):
__lowercase = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , _lowercase )
def __eq__( self , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
return not self.__eq__(_lowercase )
| 118 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
@property
def a ( self : List[str] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a ( self : Dict ):
__UpperCAmelCase = ort.SessionOptions()
__UpperCAmelCase = False
return options
def a ( self : Any ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = '''A red cat sitting on a park bench'''
__UpperCAmelCase = np.random.RandomState(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__UpperCAmelCase = np.array([0.2_514, 0.3_007, 0.3_517, 0.1_790, 0.2_382, 0.3_167, 0.1_944, 0.2_273, 0.2_464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def a ( self : Optional[int] ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__UpperCAmelCase = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
__UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=_lowercase , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = '''A red cat sitting on a park bench'''
__UpperCAmelCase = np.random.RandomState(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=20 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__UpperCAmelCase = np.array([0.0_086, 0.0_077, 0.0_083, 0.0_093, 0.0_107, 0.0_139, 0.0_094, 0.0_097, 0.0_125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 49 | 0 |
'''simple docstring'''
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def __A ( UpperCAmelCase = 8 ) -> str:
'''simple docstring'''
_UpperCamelCase : Optional[int] = ascii_letters + digits + punctuation
return "".join(secrets.choice(UpperCAmelCase ) for _ in range(UpperCAmelCase ) )
def __A ( UpperCAmelCase ,UpperCAmelCase ) -> str:
'''simple docstring'''
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(UpperCAmelCase )
_UpperCamelCase : Optional[int] = i // 3
_UpperCamelCase : Optional[Any] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
_UpperCamelCase : int = (
chars_incl
+ random(UpperCAmelCase ,quotient + remainder )
+ random(UpperCAmelCase ,UpperCAmelCase )
+ random(UpperCAmelCase ,UpperCAmelCase )
)
_UpperCamelCase : Union[str, Any] = list(UpperCAmelCase )
shuffle(UpperCAmelCase )
return "".join(UpperCAmelCase )
# random is a generalised function for letters, characters and numbers
def __A ( UpperCAmelCase ,UpperCAmelCase ) -> str:
'''simple docstring'''
return "".join(secrets.choice(UpperCAmelCase ) for _ in range(UpperCAmelCase ) )
def __A ( UpperCAmelCase ,UpperCAmelCase ) -> List[str]:
'''simple docstring'''
pass # Put your code here...
def __A ( UpperCAmelCase ,UpperCAmelCase ) -> int:
'''simple docstring'''
pass # Put your code here...
def __A ( UpperCAmelCase ,UpperCAmelCase ) -> List[str]:
'''simple docstring'''
pass # Put your code here...
def __A ( UpperCAmelCase ,UpperCAmelCase = 8 ) -> bool:
'''simple docstring'''
if len(UpperCAmelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
_UpperCamelCase : Optional[Any] = any(char in ascii_uppercase for char in password )
_UpperCamelCase : Optional[Any] = any(char in ascii_lowercase for char in password )
_UpperCamelCase : Optional[int] = any(char in digits for char in password )
_UpperCamelCase : Optional[int] = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def __A ( ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : Any = int(input("Please indicate the max length of your password: " ).strip() )
_UpperCamelCase : Dict = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:" ,password_generator(UpperCAmelCase ) )
print(
"Alternative Password generated:" ,alternative_password_generator(UpperCAmelCase ,UpperCAmelCase ) ,)
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main()
| 204 | '''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase__ = '''yolos'''
def __init__( self : int , lowercase__ : List[str]=768 , lowercase__ : Optional[Any]=12 , lowercase__ : Union[str, Any]=12 , lowercase__ : Any=3_072 , lowercase__ : List[Any]="gelu" , lowercase__ : Dict=0.0 , lowercase__ : Any=0.0 , lowercase__ : Dict=0.0_2 , lowercase__ : Tuple=1e-12 , lowercase__ : str=[512, 864] , lowercase__ : Dict=16 , lowercase__ : int=3 , lowercase__ : Optional[Any]=True , lowercase__ : List[Any]=100 , lowercase__ : str=True , lowercase__ : str=False , lowercase__ : List[str]=1 , lowercase__ : Dict=5 , lowercase__ : str=2 , lowercase__ : Optional[int]=5 , lowercase__ : Optional[int]=2 , lowercase__ : Optional[Any]=0.1 , **lowercase__ : Union[str, Any] , ) ->Tuple:
'''simple docstring'''
super().__init__(**lowercase__ )
_UpperCamelCase : Optional[int] = hidden_size
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : Optional[Any] = num_attention_heads
_UpperCamelCase : Optional[int] = intermediate_size
_UpperCamelCase : Union[str, Any] = hidden_act
_UpperCamelCase : Tuple = hidden_dropout_prob
_UpperCamelCase : Any = attention_probs_dropout_prob
_UpperCamelCase : Optional[int] = initializer_range
_UpperCamelCase : str = layer_norm_eps
_UpperCamelCase : Optional[int] = image_size
_UpperCamelCase : int = patch_size
_UpperCamelCase : Dict = num_channels
_UpperCamelCase : List[str] = qkv_bias
_UpperCamelCase : Dict = num_detection_tokens
_UpperCamelCase : int = use_mid_position_embeddings
_UpperCamelCase : int = auxiliary_loss
# Hungarian matcher
_UpperCamelCase : Tuple = class_cost
_UpperCamelCase : str = bbox_cost
_UpperCamelCase : str = giou_cost
# Loss coefficients
_UpperCamelCase : List[str] = bbox_loss_coefficient
_UpperCamelCase : str = giou_loss_coefficient
_UpperCamelCase : str = eos_coefficient
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase__ = version.parse('''1.11''' )
@property
def snake_case__ ( self : Union[str, Any] ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def snake_case__ ( self : Any ) ->float:
'''simple docstring'''
return 1e-4
@property
def snake_case__ ( self : List[str] ) ->int:
'''simple docstring'''
return 12
| 204 | 1 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
snake_case = logging.get_logger(__name__)
snake_case = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
_lowerCAmelCase : Union[str, Any] = model_type_to_module_name(lowerCAmelCase__ )
_lowerCAmelCase : List[Any] = importlib.import_module(f""".{module_name}""" , "transformers.models" )
try:
return getattr(lowerCAmelCase__ , lowerCAmelCase__ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(lowerCAmelCase__ , "__name__" , lowerCAmelCase__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_lowerCAmelCase : str = importlib.import_module("transformers" )
if hasattr(lowerCAmelCase__ , lowerCAmelCase__ ):
return getattr(lowerCAmelCase__ , lowerCAmelCase__ )
return None
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , **lowerCAmelCase__ , ):
"""simple docstring"""
_lowerCAmelCase : Dict = get_file_from_repo(
lowerCAmelCase__ , lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , resume_download=lowerCAmelCase__ , proxies=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , revision=lowerCAmelCase__ , local_files_only=lowerCAmelCase__ , )
if resolved_config_file is None:
logger.info(
"Could not locate the feature extractor configuration file, will try to use the model config instead." )
return {}
with open(lowerCAmelCase__ , encoding="utf-8" ) as reader:
return json.load(lowerCAmelCase__ )
class __A :
'''simple docstring'''
def __init__( self ):
raise EnvironmentError(
"AutoFeatureExtractor is designed to be instantiated "
"using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(_snake_case )
def SCREAMING_SNAKE_CASE__ ( cls , _snake_case , **_snake_case ):
_lowerCAmelCase : Optional[Any] = kwargs.pop("config" , _snake_case )
_lowerCAmelCase : Tuple = kwargs.pop("trust_remote_code" , _snake_case )
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase , _lowerCAmelCase : int = FeatureExtractionMixin.get_feature_extractor_dict(_snake_case , **_snake_case )
_lowerCAmelCase : Dict = config_dict.get("feature_extractor_type" , _snake_case )
_lowerCAmelCase : Tuple = None
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
_lowerCAmelCase : Dict = config_dict["auto_map"]["AutoFeatureExtractor"]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(_snake_case , _snake_case ):
_lowerCAmelCase : Any = AutoConfig.from_pretrained(_snake_case , **_snake_case )
# It could be in `config.feature_extractor_type``
_lowerCAmelCase : Any = getattr(_snake_case , "feature_extractor_type" , _snake_case )
if hasattr(_snake_case , "auto_map" ) and "AutoFeatureExtractor" in config.auto_map:
_lowerCAmelCase : Optional[Any] = config.auto_map["AutoFeatureExtractor"]
if feature_extractor_class is not None:
_lowerCAmelCase : Union[str, Any] = feature_extractor_class_from_name(_snake_case )
_lowerCAmelCase : Tuple = feature_extractor_auto_map is not None
_lowerCAmelCase : Dict = feature_extractor_class is not None or type(_snake_case ) in FEATURE_EXTRACTOR_MAPPING
_lowerCAmelCase : Tuple = resolve_trust_remote_code(
_snake_case , _snake_case , _snake_case , _snake_case )
if has_remote_code and trust_remote_code:
_lowerCAmelCase : Tuple = get_class_from_dynamic_module(
_snake_case , _snake_case , **_snake_case )
_lowerCAmelCase : Dict = kwargs.pop("code_revision" , _snake_case )
if os.path.isdir(_snake_case ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(_snake_case , **_snake_case )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(_snake_case , **_snake_case )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(_snake_case ) in FEATURE_EXTRACTOR_MAPPING:
_lowerCAmelCase : List[Any] = FEATURE_EXTRACTOR_MAPPING[type(_snake_case )]
return feature_extractor_class.from_dict(_snake_case , **_snake_case )
raise ValueError(
F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _snake_case , _snake_case ):
FEATURE_EXTRACTOR_MAPPING.register(_snake_case , _snake_case )
| 424 | from collections import namedtuple
import requests
from lxml import html # type: ignore
snake_case = namedtuple("covid_data", "cases deaths recovered")
def UpperCamelCase_ ( lowerCAmelCase__ = "https://www.worldometers.info/coronavirus/" ):
"""simple docstring"""
_lowerCAmelCase : int = "//div[@class = \"maincounter-number\"]/span/text()"
return covid_data(*html.fromstring(requests.get(lowerCAmelCase__ ).content ).xpath(lowerCAmelCase__ ) )
snake_case = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}"
print(fmt.format(*covid_stats()))
| 424 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase = {
"""configuration_bigbird_pegasus""": [
"""BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BigBirdPegasusConfig""",
"""BigBirdPegasusOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BigBirdPegasusForCausalLM""",
"""BigBirdPegasusForConditionalGeneration""",
"""BigBirdPegasusForQuestionAnswering""",
"""BigBirdPegasusForSequenceClassification""",
"""BigBirdPegasusModel""",
"""BigBirdPegasusPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 211 |
'''simple docstring'''
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = """T5Config"""
class lowerCamelCase__ ( _A ):
'''simple docstring'''
A__ = '''mt5'''
A__ = MTaConfig
class lowerCamelCase__ ( _A ):
'''simple docstring'''
A__ = '''mt5'''
A__ = MTaConfig
class lowerCamelCase__ ( _A ):
'''simple docstring'''
A__ = '''mt5'''
A__ = MTaConfig
| 211 | 1 |
'''simple docstring'''
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
__snake_case : Optional[Any] = logging.getLogger(__name__)
def _lowercase ( lowerCamelCase__ : Optional[Any], lowerCamelCase__ : List[Any] ):
_a = np.argmax(lowerCamelCase__, axis=1 )
return np.sum(outputs == labels )
def _lowercase ( lowerCamelCase__ : Dict ):
with open(lowerCamelCase__, encoding="utf_8" ) as f:
_a = csv.reader(lowerCamelCase__ )
_a = []
next(lowerCamelCase__ ) # skip the first line
for line in tqdm(lowerCamelCase__ ):
output.append((" ".join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Optional[Any], lowerCamelCase__ : Optional[Any], lowerCamelCase__ : List[str], lowerCamelCase__ : int, lowerCamelCase__ : Union[str, Any] ):
_a = []
for dataset in encoded_datasets:
_a = len(lowerCamelCase__ )
_a = np.zeros((n_batch, 2, input_len), dtype=np.intaa )
_a = np.zeros((n_batch, 2), dtype=np.intaa )
_a = np.full((n_batch, 2, input_len), fill_value=-100, dtype=np.intaa )
_a = np.zeros((n_batch,), dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(lowerCamelCase__ ):
_a = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
_a = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
_a = with_conta
_a = with_conta
_a = len(lowerCamelCase__ ) - 1
_a = len(lowerCamelCase__ ) - 1
_a = with_conta
_a = with_conta
_a = mc_label
_a = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(lowerCamelCase__ ) for t in all_inputs ) )
return tensor_datasets
def _lowercase ( ):
_a = argparse.ArgumentParser()
parser.add_argument("--model_name", type=lowerCamelCase__, default="openai-gpt", help="pretrained model name" )
parser.add_argument("--do_train", action="store_true", help="Whether to run training." )
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set." )
parser.add_argument(
"--output_dir", default=lowerCamelCase__, type=lowerCamelCase__, required=lowerCamelCase__, help="The output directory where the model predictions and checkpoints will be written.", )
parser.add_argument("--train_dataset", type=lowerCamelCase__, default="" )
parser.add_argument("--eval_dataset", type=lowerCamelCase__, default="" )
parser.add_argument("--seed", type=lowerCamelCase__, default=42 )
parser.add_argument("--num_train_epochs", type=lowerCamelCase__, default=3 )
parser.add_argument("--train_batch_size", type=lowerCamelCase__, default=8 )
parser.add_argument("--eval_batch_size", type=lowerCamelCase__, default=16 )
parser.add_argument("--adam_epsilon", default=1e-8, type=lowerCamelCase__, help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm", type=lowerCamelCase__, default=1 )
parser.add_argument(
"--max_steps", default=-1, type=lowerCamelCase__, help=(
"If > 0: set total number of training steps to perform. Override num_train_epochs."
), )
parser.add_argument(
"--gradient_accumulation_steps", type=lowerCamelCase__, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", )
parser.add_argument("--learning_rate", type=lowerCamelCase__, default=6.25e-5 )
parser.add_argument("--warmup_steps", default=0, type=lowerCamelCase__, help="Linear warmup over warmup_steps." )
parser.add_argument("--lr_schedule", type=lowerCamelCase__, default="warmup_linear" )
parser.add_argument("--weight_decay", type=lowerCamelCase__, default=0.01 )
parser.add_argument("--lm_coef", type=lowerCamelCase__, default=0.9 )
parser.add_argument("--n_valid", type=lowerCamelCase__, default=374 )
parser.add_argument("--server_ip", type=lowerCamelCase__, default="", help="Can be used for distant debugging." )
parser.add_argument("--server_port", type=lowerCamelCase__, default="", help="Can be used for distant debugging." )
_a = parser.parse_args()
print(lowerCamelCase__ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=lowerCamelCase__ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
_a = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
_a = torch.cuda.device_count()
logger.info("device: {}, n_gpu {}".format(lowerCamelCase__, lowerCamelCase__ ) )
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True." )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
_a = ["_start_", "_delimiter_", "_classify_"]
_a = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(lowerCamelCase__ )
_a = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
_a = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(lowerCamelCase__ ) )
model.to(lowerCamelCase__ )
# Load and encode the datasets
def tokenize_and_encode(lowerCamelCase__ : Optional[int] ):
if isinstance(lowerCamelCase__, lowerCamelCase__ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(lowerCamelCase__ ) )
elif isinstance(lowerCamelCase__, lowerCamelCase__ ):
return obj
return [tokenize_and_encode(lowerCamelCase__ ) for o in obj]
logger.info("Encoding dataset..." )
_a = load_rocstories_dataset(args.train_dataset )
_a = load_rocstories_dataset(args.eval_dataset )
_a = (train_dataset, eval_dataset)
_a = tokenize_and_encode(lowerCamelCase__ )
# Compute the max input length for the Transformer
_a = model.config.n_positions // 2 - 2
_a = max(
len(story[:max_length] ) + max(len(conta[:max_length] ), len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
_a = min(lowerCamelCase__, model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
_a = pre_process_datasets(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, *lowerCamelCase__ )
_a , _a = tensor_datasets[0], tensor_datasets[1]
_a = TensorDataset(*lowerCamelCase__ )
_a = RandomSampler(lowerCamelCase__ )
_a = DataLoader(lowerCamelCase__, sampler=lowerCamelCase__, batch_size=args.train_batch_size )
_a = TensorDataset(*lowerCamelCase__ )
_a = SequentialSampler(lowerCamelCase__ )
_a = DataLoader(lowerCamelCase__, sampler=lowerCamelCase__, batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
_a = args.max_steps
_a = args.max_steps // (len(lowerCamelCase__ ) // args.gradient_accumulation_steps) + 1
else:
_a = len(lowerCamelCase__ ) // args.gradient_accumulation_steps * args.num_train_epochs
_a = list(model.named_parameters() )
_a = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
_a = [
{
"params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], "weight_decay": 0.0},
]
_a = AdamW(lowerCamelCase__, lr=args.learning_rate, eps=args.adam_epsilon )
_a = get_linear_schedule_with_warmup(
lowerCamelCase__, num_warmup_steps=args.warmup_steps, num_training_steps=lowerCamelCase__ )
if args.do_train:
_a , _a , _a = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ), desc="Epoch" ):
_a = 0
_a = 0
_a = tqdm(lowerCamelCase__, desc="Training" )
for step, batch in enumerate(lowerCamelCase__ ):
_a = tuple(t.to(lowerCamelCase__ ) for t in batch )
_a , _a , _a , _a = batch
_a = model(lowerCamelCase__, mc_token_ids=lowerCamelCase__, lm_labels=lowerCamelCase__, mc_labels=lowerCamelCase__ )
_a = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
_a = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
_a = "Training loss: {:.2e} lr: {:.2e}".format(lowerCamelCase__, scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
_a = model.module if hasattr(lowerCamelCase__, "module" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
_a = os.path.join(args.output_dir, lowerCamelCase__ )
_a = os.path.join(args.output_dir, lowerCamelCase__ )
torch.save(model_to_save.state_dict(), lowerCamelCase__ )
model_to_save.config.to_json_file(lowerCamelCase__ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
_a = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
_a = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(lowerCamelCase__ )
if args.do_eval:
model.eval()
_a , _a = 0, 0
_a , _a = 0, 0
for batch in tqdm(lowerCamelCase__, desc="Evaluating" ):
_a = tuple(t.to(lowerCamelCase__ ) for t in batch )
_a , _a , _a , _a = batch
with torch.no_grad():
_a , _a , _a , _a = model(
lowerCamelCase__, mc_token_ids=lowerCamelCase__, lm_labels=lowerCamelCase__, mc_labels=lowerCamelCase__ )
_a = mc_logits.detach().cpu().numpy()
_a = mc_labels.to("cpu" ).numpy()
_a = accuracy(lowerCamelCase__, lowerCamelCase__ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
_a = eval_loss / nb_eval_steps
_a = eval_accuracy / nb_eval_examples
_a = tr_loss / nb_tr_steps if args.do_train else None
_a = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy, "train_loss": train_loss}
_a = os.path.join(args.output_dir, "eval_results.txt" )
with open(lowerCamelCase__, "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s", lowerCamelCase__, str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 131 |
'''simple docstring'''
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class A ( a , a , a , unittest.TestCase ):
__UpperCAmelCase : List[Any] = StableDiffusionControlNetImgaImgPipeline
__UpperCAmelCase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
__UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__UpperCAmelCase : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"""control_image"""} )
__UpperCAmelCase : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_a = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
torch.manual_seed(0 )
_a = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
torch.manual_seed(0 )
_a = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , )
torch.manual_seed(0 )
_a = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
_a = CLIPTextModel(snake_case_ )
_a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_a = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __lowerCAmelCase ( self , snake_case_ , snake_case_=0 ) -> Optional[int]:
if str(snake_case_ ).startswith("mps" ):
_a = torch.manual_seed(snake_case_ )
else:
_a = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
_a = 2
_a = randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=snake_case_ , device=torch.device(snake_case_ ) , )
_a = floats_tensor(control_image.shape , rng=random.Random(snake_case_ ) ).to(snake_case_ )
_a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a = Image.fromarray(np.uinta(snake_case_ ) ).convert("RGB" ).resize((6_4, 6_4) )
_a = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def __lowerCAmelCase ( self ) -> str:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __lowerCAmelCase ( self ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class A ( a , a , unittest.TestCase ):
__UpperCAmelCase : Dict = StableDiffusionControlNetImgaImgPipeline
__UpperCAmelCase : int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
__UpperCAmelCase : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__UpperCAmelCase : List[Any] = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def __lowerCAmelCase ( self ) -> str:
torch.manual_seed(0 )
_a = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
torch.manual_seed(0 )
def init_weights(snake_case_ ):
if isinstance(snake_case_ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
_a = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
controlneta.controlnet_down_blocks.apply(snake_case_ )
torch.manual_seed(0 )
_a = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
controlneta.controlnet_down_blocks.apply(snake_case_ )
torch.manual_seed(0 )
_a = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , )
torch.manual_seed(0 )
_a = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
_a = CLIPTextModel(snake_case_ )
_a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_a = MultiControlNetModel([controlneta, controlneta] )
_a = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __lowerCAmelCase ( self , snake_case_ , snake_case_=0 ) -> Optional[int]:
if str(snake_case_ ).startswith("mps" ):
_a = torch.manual_seed(snake_case_ )
else:
_a = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
_a = 2
_a = [
randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=snake_case_ , device=torch.device(snake_case_ ) , ),
randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=snake_case_ , device=torch.device(snake_case_ ) , ),
]
_a = floats_tensor(control_image[0].shape , rng=random.Random(snake_case_ ) ).to(snake_case_ )
_a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a = Image.fromarray(np.uinta(snake_case_ ) ).convert("RGB" ).resize((6_4, 6_4) )
_a = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def __lowerCAmelCase ( self ) -> Tuple:
_a = self.get_dummy_components()
_a = self.pipeline_class(**snake_case_ )
pipe.to(snake_case_ )
_a = 10.0
_a = 4
_a = self.get_dummy_inputs(snake_case_ )
_a = steps
_a = scale
_a = pipe(**snake_case_ )[0]
_a = self.get_dummy_inputs(snake_case_ )
_a = steps
_a = scale
_a = pipe(**snake_case_ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
_a = self.get_dummy_inputs(snake_case_ )
_a = steps
_a = scale
_a = pipe(**snake_case_ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
_a = self.get_dummy_inputs(snake_case_ )
_a = steps
_a = scale
_a = pipe(**snake_case_ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def __lowerCAmelCase ( self ) -> Optional[int]:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __lowerCAmelCase ( self ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> List[Any]:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.get_dummy_components()
_a = self.pipeline_class(**snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(snake_case_ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny" )
_a = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , safety_checker=snake_case_ , controlnet=snake_case_ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=snake_case_ )
_a = torch.Generator(device="cpu" ).manual_seed(0 )
_a = "evil space-punk bird"
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((5_1_2, 5_1_2) )
_a = load_image(
"https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" ).resize((5_1_2, 5_1_2) )
_a = pipe(
snake_case_ , snake_case_ , control_image=snake_case_ , generator=snake_case_ , output_type="np" , num_inference_steps=5_0 , strength=0.6 , )
_a = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
_a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy" )
assert np.abs(expected_image - image ).max() < 9E-2
| 131 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
"""microsoft/unispeech-sat-base-100h-libri-ft""": (
"""https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"""
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class _SCREAMING_SNAKE_CASE (UpperCamelCase ):
lowerCAmelCase = """unispeech-sat"""
def __init__( self : Any , UpperCamelCase : int=3_2 , UpperCamelCase : List[Any]=7_6_8 , UpperCamelCase : str=1_2 , UpperCamelCase : Dict=1_2 , UpperCamelCase : str=3_0_7_2 , UpperCamelCase : int="gelu" , UpperCamelCase : Dict=0.1 , UpperCamelCase : Tuple=0.1 , UpperCamelCase : Union[str, Any]=0.1 , UpperCamelCase : List[str]=0.0 , UpperCamelCase : List[str]=0.0 , UpperCamelCase : str=0.1 , UpperCamelCase : str=0.1 , UpperCamelCase : int=0.0_2 , UpperCamelCase : List[Any]=1E-5 , UpperCamelCase : Optional[int]="group" , UpperCamelCase : int="gelu" , UpperCamelCase : str=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCamelCase : int=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase : Optional[Any]=(1_0, 3, 3, 3, 3, 2, 2) , UpperCamelCase : Optional[int]=False , UpperCamelCase : int=1_2_8 , UpperCamelCase : Dict=1_6 , UpperCamelCase : Tuple=False , UpperCamelCase : Optional[Any]=True , UpperCamelCase : List[Any]=0.0_5 , UpperCamelCase : int=1_0 , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : List[Any]=0.0 , UpperCamelCase : Optional[int]=1_0 , UpperCamelCase : Union[str, Any]=0 , UpperCamelCase : Union[str, Any]=3_2_0 , UpperCamelCase : str=2 , UpperCamelCase : List[str]=0.1 , UpperCamelCase : List[str]=1_0_0 , UpperCamelCase : List[str]=2_5_6 , UpperCamelCase : Optional[int]=2_5_6 , UpperCamelCase : str=0.1 , UpperCamelCase : Union[str, Any]="mean" , UpperCamelCase : Dict=False , UpperCamelCase : Optional[Any]=False , UpperCamelCase : int=2_5_6 , UpperCamelCase : Tuple=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCamelCase : Dict=(5, 3, 3, 1, 1) , UpperCamelCase : Union[str, Any]=(1, 2, 3, 1, 1) , UpperCamelCase : List[Any]=5_1_2 , UpperCamelCase : str=0 , UpperCamelCase : Any=1 , UpperCamelCase : List[Any]=2 , UpperCamelCase : Tuple=5_0_4 , **UpperCamelCase : Dict , )->Dict:
super().__init__(**UpperCamelCase , pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = hidden_size
__SCREAMING_SNAKE_CASE : Any = feat_extract_norm
__SCREAMING_SNAKE_CASE : str = feat_extract_activation
__SCREAMING_SNAKE_CASE : Tuple = list(UpperCamelCase )
__SCREAMING_SNAKE_CASE : int = list(UpperCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = list(UpperCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = conv_bias
__SCREAMING_SNAKE_CASE : Dict = num_conv_pos_embeddings
__SCREAMING_SNAKE_CASE : Any = num_conv_pos_embedding_groups
__SCREAMING_SNAKE_CASE : Tuple = len(self.conv_dim )
__SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
__SCREAMING_SNAKE_CASE : Tuple = intermediate_size
__SCREAMING_SNAKE_CASE : List[str] = hidden_act
__SCREAMING_SNAKE_CASE : Dict = num_attention_heads
__SCREAMING_SNAKE_CASE : Any = hidden_dropout
__SCREAMING_SNAKE_CASE : Dict = attention_dropout
__SCREAMING_SNAKE_CASE : Optional[int] = activation_dropout
__SCREAMING_SNAKE_CASE : List[Any] = feat_proj_dropout
__SCREAMING_SNAKE_CASE : Any = final_dropout
__SCREAMING_SNAKE_CASE : Any = layerdrop
__SCREAMING_SNAKE_CASE : Optional[Any] = layer_norm_eps
__SCREAMING_SNAKE_CASE : Dict = initializer_range
__SCREAMING_SNAKE_CASE : str = vocab_size
__SCREAMING_SNAKE_CASE : List[str] = num_clusters
__SCREAMING_SNAKE_CASE : Union[str, Any] = do_stable_layer_norm
__SCREAMING_SNAKE_CASE : Optional[int] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__SCREAMING_SNAKE_CASE : Tuple = apply_spec_augment
__SCREAMING_SNAKE_CASE : str = mask_time_prob
__SCREAMING_SNAKE_CASE : List[str] = mask_time_length
__SCREAMING_SNAKE_CASE : str = mask_time_min_masks
__SCREAMING_SNAKE_CASE : Dict = mask_feature_prob
__SCREAMING_SNAKE_CASE : str = mask_feature_length
__SCREAMING_SNAKE_CASE : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__SCREAMING_SNAKE_CASE : str = num_codevectors_per_group
__SCREAMING_SNAKE_CASE : Optional[int] = num_codevector_groups
__SCREAMING_SNAKE_CASE : Optional[int] = contrastive_logits_temperature
__SCREAMING_SNAKE_CASE : Dict = feat_quantizer_dropout
__SCREAMING_SNAKE_CASE : str = num_negatives
__SCREAMING_SNAKE_CASE : int = codevector_dim
__SCREAMING_SNAKE_CASE : List[str] = proj_codevector_dim
__SCREAMING_SNAKE_CASE : Any = diversity_loss_weight
# ctc loss
__SCREAMING_SNAKE_CASE : str = ctc_loss_reduction
__SCREAMING_SNAKE_CASE : Any = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__SCREAMING_SNAKE_CASE : Optional[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__SCREAMING_SNAKE_CASE : str = list(UpperCamelCase )
__SCREAMING_SNAKE_CASE : str = list(UpperCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = list(UpperCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = xvector_output_dim
@property
def __snake_case ( self : Optional[int] )->Tuple:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 714 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
_lowerCamelCase = importlib.util.find_spec("""s3fs""") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
_lowerCamelCase = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def _lowerCAmelCase ( __lowerCamelCase : str ):
"""simple docstring"""
if "://" in dataset_path:
__SCREAMING_SNAKE_CASE : Any = dataset_path.split("://" )[1]
return dataset_path
def _lowerCAmelCase ( __lowerCamelCase : fsspec.AbstractFileSystem ):
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def _lowerCAmelCase ( __lowerCamelCase : fsspec.AbstractFileSystem , __lowerCamelCase : str , __lowerCamelCase : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = not is_remote_filesystem(__lowerCamelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__lowerCamelCase ) , fs._strip_protocol(__lowerCamelCase ) )
else:
fs.mv(__lowerCamelCase , __lowerCamelCase , recursive=__lowerCamelCase )
def _lowerCAmelCase ( ):
"""simple docstring"""
if hasattr(fsspec.asyn , "reset_lock" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
__SCREAMING_SNAKE_CASE : Tuple = None
__SCREAMING_SNAKE_CASE : Optional[Any] = None
__SCREAMING_SNAKE_CASE : Dict = threading.Lock()
| 447 | 0 |
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
class A_ (a_ ):
"""simple docstring"""
def __init__( self :Dict , lowerCAmelCase__ :CLIPSegForImageSegmentation , lowerCAmelCase__ :CLIPSegProcessor , lowerCAmelCase__ :AutoencoderKL , lowerCAmelCase__ :CLIPTextModel , lowerCAmelCase__ :CLIPTokenizer , lowerCAmelCase__ :UNetaDConditionModel , lowerCAmelCase__ :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCAmelCase__ :StableDiffusionSafetyChecker , lowerCAmelCase__ :CLIPImageProcessor , ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
snake_case_ : str = (
F'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'''
F''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '''
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , lowerCAmelCase__ , standard_warn=lowerCAmelCase__ )
snake_case_ : List[Any] = dict(scheduler.config )
snake_case_ : Tuple = 1
snake_case_ : int = FrozenDict(lowerCAmelCase__ )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
snake_case_ : Tuple = (
F'''The configuration file of this scheduler: {scheduler} has not set the configuration'''
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , lowerCAmelCase__ , standard_warn=lowerCAmelCase__ )
snake_case_ : Any = dict(scheduler.config )
snake_case_ : Optional[Any] = True
snake_case_ : Dict = FrozenDict(lowerCAmelCase__ )
if safety_checker is None:
logger.warning(
F'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=lowerCAmelCase__ , segmentation_processor=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , )
def _A ( self :str , lowerCAmelCase__ :Optional[Union[str, int]] = "auto" ) -> List[Any]:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
snake_case_ : Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCAmelCase__ )
def _A ( self :Any ) -> Tuple:
'''simple docstring'''
self.enable_attention_slicing(lowerCAmelCase__ )
def _A ( self :str ) -> Union[str, Any]:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
snake_case_ : Optional[Any] = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(lowerCAmelCase__ , lowerCAmelCase__ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _A ( self :List[Any] ) -> List[str]:
'''simple docstring'''
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCAmelCase__ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self :Optional[Any] , lowerCAmelCase__ :Union[str, List[str]] , lowerCAmelCase__ :Union[torch.FloatTensor, PIL.Image.Image] , lowerCAmelCase__ :str , lowerCAmelCase__ :int = 512 , lowerCAmelCase__ :int = 512 , lowerCAmelCase__ :int = 50 , lowerCAmelCase__ :float = 7.5 , lowerCAmelCase__ :Optional[Union[str, List[str]]] = None , lowerCAmelCase__ :Optional[int] = 1 , lowerCAmelCase__ :float = 0.0 , lowerCAmelCase__ :Optional[torch.Generator] = None , lowerCAmelCase__ :Optional[torch.FloatTensor] = None , lowerCAmelCase__ :Optional[str] = "pil" , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase__ :int = 1 , **lowerCAmelCase__ :Optional[int] , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
snake_case_ : int = self.segmentation_model(**lowerCAmelCase__ )
snake_case_ : str = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
snake_case_ : Optional[Any] = self.numpy_to_pil(lowerCAmelCase__ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
snake_case_ : int = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , height=lowerCAmelCase__ , width=lowerCAmelCase__ , num_inference_steps=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ , eta=lowerCAmelCase__ , generator=lowerCAmelCase__ , latents=lowerCAmelCase__ , output_type=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=lowerCAmelCase__ , )
| 653 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
if not isinstance(__magic_name__ ,__magic_name__ ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(__magic_name__ ,__magic_name__ ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
snake_case_ : Dict = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__magic_name__ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 653 | 1 |
from string import ascii_uppercase
__SCREAMING_SNAKE_CASE : Tuple ={str(ord(c) - 55): c for c in ascii_uppercase}
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ):
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
raise TypeError("""int() can\'t convert non-string with explicit base""" )
if num < 0:
raise ValueError("""parameter must be positive int""" )
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
raise TypeError("""\'str\' object cannot be interpreted as an integer""" )
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
raise TypeError("""\'float\' object cannot be interpreted as an integer""" )
if base in (0, 1):
raise ValueError("""base must be >= 2""" )
if base > 36:
raise ValueError("""base must be <= 36""" )
lowercase = ''
lowercase = 0
lowercase = 0
while div != 1:
lowercase = divmod(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
if base >= 11 and 9 < mod < 36:
lowercase = ALPHABET_VALUES[str(SCREAMING_SNAKE_CASE_ )]
else:
lowercase = str(SCREAMING_SNAKE_CASE_ )
new_value += actual_value
lowercase = num // base
lowercase = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(SCREAMING_SNAKE_CASE_ )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1_000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 703 |
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : str =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : str ={
'''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class A_ ( __a ):
_A :Tuple = '''data2vec-audio'''
def __init__( self : Optional[Any] , snake_case__ : List[Any]=32 , snake_case__ : List[Any]=7_68 , snake_case__ : int=12 , snake_case__ : Dict=12 , snake_case__ : List[str]=30_72 , snake_case__ : List[str]="gelu" , snake_case__ : Optional[int]=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : int=0.1 , snake_case__ : Tuple=0.0 , snake_case__ : Tuple=0.1 , snake_case__ : Any=0.1 , snake_case__ : Dict=0.02 , snake_case__ : List[str]=1E-5 , snake_case__ : Optional[Any]="gelu" , snake_case__ : Union[str, Any]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , snake_case__ : List[str]=(5, 2, 2, 2, 2, 2, 2) , snake_case__ : str=(10, 3, 3, 3, 3, 2, 2) , snake_case__ : Any=False , snake_case__ : List[str]=16 , snake_case__ : Any=19 , snake_case__ : Optional[Any]=5 , snake_case__ : str=0.05 , snake_case__ : Tuple=10 , snake_case__ : Optional[Any]=2 , snake_case__ : Dict=0.0 , snake_case__ : int=10 , snake_case__ : Any=0 , snake_case__ : int="sum" , snake_case__ : str=False , snake_case__ : str=False , snake_case__ : Optional[int]=2_56 , snake_case__ : List[str]=(5_12, 5_12, 5_12, 5_12, 15_00) , snake_case__ : List[str]=(5, 3, 3, 1, 1) , snake_case__ : int=(1, 2, 3, 1, 1) , snake_case__ : Optional[Any]=5_12 , snake_case__ : Dict=0 , snake_case__ : Optional[Any]=1 , snake_case__ : Tuple=2 , snake_case__ : Tuple=False , snake_case__ : List[str]=3 , snake_case__ : List[str]=2 , snake_case__ : Tuple=3 , snake_case__ : List[str]=None , **snake_case__ : str , ):
super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ )
lowercase = hidden_size
lowercase = feat_extract_activation
lowercase = list(snake_case__ )
lowercase = list(snake_case__ )
lowercase = list(snake_case__ )
lowercase = conv_bias
lowercase = num_conv_pos_embeddings
lowercase = num_conv_pos_embedding_groups
lowercase = conv_pos_kernel_size
lowercase = len(self.conv_dim )
lowercase = num_hidden_layers
lowercase = intermediate_size
lowercase = hidden_act
lowercase = num_attention_heads
lowercase = hidden_dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = feat_proj_dropout
lowercase = final_dropout
lowercase = layerdrop
lowercase = layer_norm_eps
lowercase = initializer_range
lowercase = vocab_size
lowercase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase = mask_time_prob
lowercase = mask_time_length
lowercase = mask_time_min_masks
lowercase = mask_feature_prob
lowercase = mask_feature_length
lowercase = mask_feature_min_masks
# ctc loss
lowercase = ctc_loss_reduction
lowercase = ctc_zero_infinity
# adapter
lowercase = add_adapter
lowercase = adapter_kernel_size
lowercase = adapter_stride
lowercase = num_adapter_layers
lowercase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowercase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowercase = list(snake_case__ )
lowercase = list(snake_case__ )
lowercase = list(snake_case__ )
lowercase = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
return math.prod(self.conv_stride )
| 72 | 0 |
from collections import deque
from math import floor
from random import random
from time import time
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self: Union[str, Any] ):
lowercase__ : Optional[int] = {}
def snake_case__( self: Any, lowerCamelCase_: int, lowerCamelCase_: List[Any], lowerCamelCase_: Optional[int]=1 ):
if self.graph.get(__a ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
lowercase__ : Tuple = [[w, v]]
if not self.graph.get(__a ):
lowercase__ : Optional[Any] = []
def snake_case__( self: str ):
return list(self.graph )
def snake_case__( self: Any, lowerCamelCase_: int, lowerCamelCase_: Any ):
if self.graph.get(__a ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__a )
def snake_case__( self: Optional[int], lowerCamelCase_: str=-2, lowerCamelCase_: Optional[Any]=-1 ):
if s == d:
return []
lowercase__ : Union[str, Any] = []
lowercase__ : str = []
if s == -2:
lowercase__ : List[Any] = list(self.graph )[0]
stack.append(__a )
visited.append(__a )
lowercase__ : int = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__ : Optional[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__a )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowercase__ : Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__a ) != 0:
lowercase__ : int = stack[len(__a ) - 1]
else:
lowercase__ : Dict = ss
# check if se have reached the starting point
if len(__a ) == 0:
return visited
def snake_case__( self: str, lowerCamelCase_: str=-1 ):
if c == -1:
lowercase__ : List[str] = floor(random() * 10000 ) + 10
for i in range(__a ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowercase__ : Optional[int] = floor(random() * c ) + 1
if n != i:
self.add_pair(__a, __a, 1 )
def snake_case__( self: Any, lowerCamelCase_: int=-2 ):
lowercase__ : List[Any] = deque()
lowercase__ : List[str] = []
if s == -2:
lowercase__ : str = list(self.graph )[0]
d.append(__a )
visited.append(__a )
while d:
lowercase__ : Dict = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def snake_case__( self: Tuple, lowerCamelCase_: Union[str, Any] ):
lowercase__ : Optional[int] = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def snake_case__( self: Optional[Any], lowerCamelCase_: Any ):
return len(self.graph[u] )
def snake_case__( self: Optional[int], lowerCamelCase_: Tuple=-2 ):
lowercase__ : Any = []
lowercase__ : Optional[Any] = []
if s == -2:
lowercase__ : List[Any] = list(self.graph )[0]
stack.append(__a )
visited.append(__a )
lowercase__ : Tuple = s
lowercase__ : List[Any] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__ : List[str] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__ : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(__a ) != 0:
lowercase__ : Dict = stack[len(__a ) - 1]
else:
lowercase__ : Tuple = ss
# check if se have reached the starting point
if len(__a ) == 0:
return sorted_nodes
def snake_case__( self: Dict ):
lowercase__ : Any = []
lowercase__ : Union[str, Any] = []
lowercase__ : List[str] = list(self.graph )[0]
stack.append(__a )
visited.append(__a )
lowercase__ : Union[str, Any] = -2
lowercase__ : Optional[int] = []
lowercase__ : Optional[int] = s
lowercase__ : str = False
lowercase__ : Optional[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__ : Any = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase__ : List[Any] = len(__a ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__ : Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase__ : Optional[int] = True
if len(__a ) != 0:
lowercase__ : Tuple = stack[len(__a ) - 1]
else:
lowercase__ : List[str] = False
indirect_parents.append(__a )
lowercase__ : Any = s
lowercase__ : int = ss
# check if se have reached the starting point
if len(__a ) == 0:
return list(__a )
def snake_case__( self: Any ):
lowercase__ : str = []
lowercase__ : Any = []
lowercase__ : List[Any] = list(self.graph )[0]
stack.append(__a )
visited.append(__a )
lowercase__ : Any = -2
lowercase__ : Optional[int] = []
lowercase__ : Tuple = s
lowercase__ : Tuple = False
lowercase__ : Tuple = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__ : Union[str, Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase__ : str = len(__a ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__ : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase__ : List[Any] = True
if len(__a ) != 0:
lowercase__ : List[str] = stack[len(__a ) - 1]
else:
lowercase__ : str = False
indirect_parents.append(__a )
lowercase__ : Any = s
lowercase__ : List[str] = ss
# check if se have reached the starting point
if len(__a ) == 0:
return False
def snake_case__( self: Optional[int], lowerCamelCase_: Tuple=-2, lowerCamelCase_: List[Any]=-1 ):
lowercase__ : Union[str, Any] = time()
self.dfs(__a, __a )
lowercase__ : Tuple = time()
return end - begin
def snake_case__( self: List[str], lowerCamelCase_: Optional[Any]=-2 ):
lowercase__ : str = time()
self.bfs(__a )
lowercase__ : Tuple = time()
return end - begin
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self: Any ):
lowercase__ : List[Any] = {}
def snake_case__( self: Tuple, lowerCamelCase_: Any, lowerCamelCase_: int, lowerCamelCase_: List[Any]=1 ):
# check if the u exists
if self.graph.get(__a ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
lowercase__ : Any = [[w, v]]
# add the other way
if self.graph.get(__a ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
lowercase__ : Dict = [[w, u]]
def snake_case__( self: Tuple, lowerCamelCase_: List[str], lowerCamelCase_: List[str] ):
if self.graph.get(__a ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__a )
# the other way round
if self.graph.get(__a ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(__a )
def snake_case__( self: Any, lowerCamelCase_: str=-2, lowerCamelCase_: str=-1 ):
if s == d:
return []
lowercase__ : Dict = []
lowercase__ : List[Any] = []
if s == -2:
lowercase__ : Tuple = list(self.graph )[0]
stack.append(__a )
visited.append(__a )
lowercase__ : Any = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__ : Any = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__a )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowercase__ : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__a ) != 0:
lowercase__ : List[str] = stack[len(__a ) - 1]
else:
lowercase__ : Union[str, Any] = ss
# check if se have reached the starting point
if len(__a ) == 0:
return visited
def snake_case__( self: Any, lowerCamelCase_: Tuple=-1 ):
if c == -1:
lowercase__ : Any = floor(random() * 10000 ) + 10
for i in range(__a ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowercase__ : Union[str, Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(__a, __a, 1 )
def snake_case__( self: Tuple, lowerCamelCase_: int=-2 ):
lowercase__ : Dict = deque()
lowercase__ : int = []
if s == -2:
lowercase__ : str = list(self.graph )[0]
d.append(__a )
visited.append(__a )
while d:
lowercase__ : Optional[Any] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def snake_case__( self: str, lowerCamelCase_: str ):
return len(self.graph[u] )
def snake_case__( self: Any ):
lowercase__ : int = []
lowercase__ : Tuple = []
lowercase__ : str = list(self.graph )[0]
stack.append(__a )
visited.append(__a )
lowercase__ : List[str] = -2
lowercase__ : Optional[Any] = []
lowercase__ : Optional[Any] = s
lowercase__ : List[Any] = False
lowercase__ : List[str] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__ : Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase__ : Any = len(__a ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__ : List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase__ : Any = True
if len(__a ) != 0:
lowercase__ : Tuple = stack[len(__a ) - 1]
else:
lowercase__ : Dict = False
indirect_parents.append(__a )
lowercase__ : str = s
lowercase__ : Any = ss
# check if se have reached the starting point
if len(__a ) == 0:
return list(__a )
def snake_case__( self: Any ):
lowercase__ : str = []
lowercase__ : str = []
lowercase__ : str = list(self.graph )[0]
stack.append(__a )
visited.append(__a )
lowercase__ : List[str] = -2
lowercase__ : List[str] = []
lowercase__ : Optional[int] = s
lowercase__ : Tuple = False
lowercase__ : List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__ : Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase__ : Tuple = len(__a ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__ : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase__ : str = True
if len(__a ) != 0:
lowercase__ : Optional[int] = stack[len(__a ) - 1]
else:
lowercase__ : Tuple = False
indirect_parents.append(__a )
lowercase__ : List[str] = s
lowercase__ : List[Any] = ss
# check if se have reached the starting point
if len(__a ) == 0:
return False
def snake_case__( self: Tuple ):
return list(self.graph )
def snake_case__( self: Optional[Any], lowerCamelCase_: Dict=-2, lowerCamelCase_: Optional[Any]=-1 ):
lowercase__ : List[str] = time()
self.dfs(__a, __a )
lowercase__ : Optional[int] = time()
return end - begin
def snake_case__( self: Union[str, Any], lowerCamelCase_: Optional[Any]=-2 ):
lowercase__ : Tuple = time()
self.bfs(__a )
lowercase__ : Optional[int] = time()
return end - begin
| 266 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def snake_case ( UpperCamelCase__ : int ) -> int:
lowerCamelCase : Any = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowerCamelCase : int = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
lowerCamelCase : int = 4
lowerCamelCase : Optional[Any] = 48
lowerCamelCase : Union[str, Any] = """pixelshuffle_aux"""
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowerCamelCase : Any = [6, 6, 6, 6]
lowerCamelCase : Union[str, Any] = 60
lowerCamelCase : List[str] = [6, 6, 6, 6]
lowerCamelCase : Dict = """pixelshuffledirect"""
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowerCamelCase : Tuple = 4
lowerCamelCase : Any = """nearest+conv"""
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
lowerCamelCase : Optional[int] = 1
lowerCamelCase : Optional[Any] = 1
lowerCamelCase : List[Any] = 126
lowerCamelCase : Optional[Any] = 7
lowerCamelCase : List[str] = 2_5_5.0
lowerCamelCase : List[str] = """"""
return config
def snake_case ( UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ) -> str:
if "patch_embed.proj" in name and "layers" not in name:
lowerCamelCase : str = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowerCamelCase : Dict = name.replace("""patch_embed.norm""" , """embeddings.patch_embeddings.layernorm""" )
if "layers" in name:
lowerCamelCase : Tuple = name.replace("""layers""" , """encoder.stages""" )
if "residual_group.blocks" in name:
lowerCamelCase : str = name.replace("""residual_group.blocks""" , """layers""" )
if "attn.proj" in name:
lowerCamelCase : Tuple = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowerCamelCase : Dict = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowerCamelCase : Dict = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowerCamelCase : Tuple = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowerCamelCase : Optional[int] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCamelCase : Optional[int] = name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
lowerCamelCase : Dict = name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
lowerCamelCase : Any = name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
lowerCamelCase : int = name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
lowerCamelCase : Any = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if "patch_embed.proj" in name:
lowerCamelCase : Tuple = name.replace("""patch_embed.proj""" , """patch_embed.projection""" )
if name == "norm.weight":
lowerCamelCase : int = """layernorm.weight"""
if name == "norm.bias":
lowerCamelCase : Dict = """layernorm.bias"""
if "conv_first" in name:
lowerCamelCase : Union[str, Any] = name.replace("""conv_first""" , """first_convolution""" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
lowerCamelCase : Dict = name.replace("""conv_last""" , """final_convolution""" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
lowerCamelCase : List[str] = name.replace("""conv_before_upsample.0""" , """conv_before_upsample""" )
if "upsample.0" in name:
lowerCamelCase : List[Any] = name.replace("""upsample.0""" , """upsample.convolution_0""" )
if "upsample.2" in name:
lowerCamelCase : int = name.replace("""upsample.2""" , """upsample.convolution_1""" )
lowerCamelCase : List[Any] = """upsample.""" + name
elif config.upsampler == "pixelshuffledirect":
lowerCamelCase : Union[str, Any] = name.replace("""upsample.0.weight""" , """upsample.conv.weight""" )
lowerCamelCase : Optional[Any] = name.replace("""upsample.0.bias""" , """upsample.conv.bias""" )
else:
pass
else:
lowerCamelCase : Optional[int] = """swin2sr.""" + name
return name
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : Any ) -> Dict:
for key in orig_state_dict.copy().keys():
lowerCamelCase : List[Any] = orig_state_dict.pop(UpperCamelCase__ )
if "qkv" in key:
lowerCamelCase : Any = key.split(""".""" )
lowerCamelCase : List[Any] = int(key_split[1] )
lowerCamelCase : List[str] = int(key_split[4] )
lowerCamelCase : List[str] = config.embed_dim
if "weight" in key:
lowerCamelCase : Optional[int] = val[:dim, :]
lowerCamelCase : Any = val[dim : dim * 2, :]
lowerCamelCase : Optional[Any] = val[-dim:, :]
else:
lowerCamelCase : int = val[:dim]
lowerCamelCase : List[Any] = val[dim : dim * 2]
lowerCamelCase : Dict = val[-dim:]
pass
else:
lowerCamelCase : Any = val
return orig_state_dict
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : int ) -> List[Any]:
lowerCamelCase : List[Any] = get_config(UpperCamelCase__ )
lowerCamelCase : Tuple = SwinaSRForImageSuperResolution(UpperCamelCase__ )
model.eval()
lowerCamelCase : str = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location="""cpu""" )
lowerCamelCase : int = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : str = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
raise ValueError("""Missing keys when converting: {}""".format(UpperCamelCase__ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F'Unexpected key {key} in state_dict' )
# verify values
lowerCamelCase : List[str] = """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"""
lowerCamelCase : Dict = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert("""RGB""" )
lowerCamelCase : Optional[Any] = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
lowerCamelCase : str = 126 if """Jpeg""" in checkpoint_url else 256
lowerCamelCase : Any = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
lowerCamelCase : Optional[Any] = transforms(UpperCamelCase__ ).unsqueeze(0 )
if config.num_channels == 1:
lowerCamelCase : Union[str, Any] = pixel_values[:, 0, :, :].unsqueeze(1 )
lowerCamelCase : List[Any] = model(UpperCamelCase__ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
lowerCamelCase : str = torch.Size([1, 3, 512, 512] )
lowerCamelCase : Optional[Any] = torch.tensor(
[[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowerCamelCase : Any = torch.Size([1, 3, 1024, 1024] )
lowerCamelCase : str = torch.tensor(
[[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
lowerCamelCase : Dict = torch.Size([1, 3, 1024, 1024] )
lowerCamelCase : str = torch.tensor(
[[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowerCamelCase : Any = torch.Size([1, 3, 512, 512] )
lowerCamelCase : Dict = torch.tensor(
[[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowerCamelCase : str = torch.Size([1, 3, 1024, 1024] )
lowerCamelCase : Optional[int] = torch.tensor(
[[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] )
assert (
outputs.reconstruction.shape == expected_shape
), F'Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , UpperCamelCase__ , atol=1E-3 )
print("""Looks ok!""" )
lowerCamelCase : Dict = {
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": (
"""swin2SR-classical-sr-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": (
"""swin2SR-classical-sr-x4-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": (
"""swin2SR-compressed-sr-x4-48"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": (
"""swin2SR-lightweight-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": (
"""swin2SR-realworld-sr-x4-64-bsrgan-psnr"""
),
}
lowerCamelCase : List[str] = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCamelCase__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
model.push_to_hub(F'caidas/{model_name}' )
processor.push_to_hub(F'caidas/{model_name}' )
if __name__ == "__main__":
__lowerCamelCase :Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth',
type=str,
help='URL of the original Swin2SR checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the converted model to the hub.')
__lowerCamelCase :Optional[Any] = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 222 | 0 |
'''simple docstring'''
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class __lowercase :
"""simple docstring"""
def lowerCAmelCase ( self ):
torch.manual_seed(0 )
__UpperCamelCase : int = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
__UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
__UpperCamelCase : str = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=1 , block_out_channels=[3_2, 6_4] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__UpperCamelCase : List[str] = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=_lowerCamelCase , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
__UpperCamelCase : Optional[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowerCAmelCase ( self ):
torch.manual_seed(0 )
__UpperCamelCase : Any = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
__UpperCamelCase : Dict = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
__UpperCamelCase : List[Any] = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=[1, 2] , block_out_channels=[3_2, 6_4] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.4_1_4 , time_embedding_act_fn='gelu' , time_embedding_dim=3_2 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__UpperCamelCase : str = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=_lowerCamelCase , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
__UpperCamelCase : List[Any] = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0_0_0_1 , beta_end=0.0_2 , )
torch.manual_seed(0 )
__UpperCamelCase : Tuple = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowerCAmelCase ( self ):
__UpperCamelCase : Any = self.get_dummy_components()
__UpperCamelCase : str = self.pipeline_class(**_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__UpperCamelCase : Any = self.get_dummy_inputs(_lowerCamelCase )
__UpperCamelCase : Dict = inputs["""prompt"""]
__UpperCamelCase : Tuple = inputs["""generator"""]
__UpperCamelCase : Tuple = inputs["""num_inference_steps"""]
__UpperCamelCase : Tuple = inputs["""output_type"""]
if "image" in inputs:
__UpperCamelCase : List[str] = inputs["""image"""]
else:
__UpperCamelCase : Union[str, Any] = None
if "mask_image" in inputs:
__UpperCamelCase : Optional[Any] = inputs["""mask_image"""]
else:
__UpperCamelCase : Tuple = None
if "original_image" in inputs:
__UpperCamelCase : List[str] = inputs["""original_image"""]
else:
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : str = pipe.encode_prompt(_lowerCamelCase )
# inputs with prompt converted to embeddings
__UpperCamelCase : Union[str, Any] = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
__UpperCamelCase : int = image
if mask_image is not None:
__UpperCamelCase : Optional[Any] = mask_image
if original_image is not None:
__UpperCamelCase : str = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__UpperCamelCase : Optional[int] = pipe(**_lowerCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowerCamelCase )
__UpperCamelCase : Any = self.pipeline_class.from_pretrained(_lowerCamelCase )
pipe_loaded.to(_lowerCamelCase )
pipe_loaded.set_progress_bar_config(disable=_lowerCamelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_lowerCamelCase , _lowerCamelCase ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , )
__UpperCamelCase : Optional[int] = self.get_dummy_inputs(_lowerCamelCase )
__UpperCamelCase : List[str] = inputs["""generator"""]
__UpperCamelCase : List[str] = inputs["""num_inference_steps"""]
__UpperCamelCase : List[str] = inputs["""output_type"""]
# inputs with prompt converted to embeddings
__UpperCamelCase : Dict = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
__UpperCamelCase : Union[str, Any] = image
if mask_image is not None:
__UpperCamelCase : List[str] = mask_image
if original_image is not None:
__UpperCamelCase : int = original_image
__UpperCamelCase : List[str] = pipe_loaded(**_lowerCamelCase )[0]
__UpperCamelCase : Optional[int] = np.abs(to_np(_lowerCamelCase ) - to_np(_lowerCamelCase ) ).max()
self.assertLess(_lowerCamelCase , 1E-4 )
def lowerCAmelCase ( self ):
__UpperCamelCase : List[str] = self.get_dummy_components()
__UpperCamelCase : Any = self.pipeline_class(**_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__UpperCamelCase : Optional[Any] = self.get_dummy_inputs(_lowerCamelCase )
__UpperCamelCase : List[Any] = pipe(**_lowerCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowerCamelCase )
__UpperCamelCase : Tuple = self.pipeline_class.from_pretrained(_lowerCamelCase )
pipe_loaded.to(_lowerCamelCase )
pipe_loaded.set_progress_bar_config(disable=_lowerCamelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
__UpperCamelCase : Tuple = self.get_dummy_inputs(_lowerCamelCase )
__UpperCamelCase : int = pipe_loaded(**_lowerCamelCase )[0]
__UpperCamelCase : List[Any] = np.abs(to_np(_lowerCamelCase ) - to_np(_lowerCamelCase ) ).max()
self.assertLess(_lowerCamelCase , 1E-4 )
| 714 | '''simple docstring'''
class __lowercase :
"""simple docstring"""
def __init__( self ):
__UpperCamelCase : Any = 0
__UpperCamelCase : Any = 0
__UpperCamelCase : Any = {}
def lowerCAmelCase ( self , _lowerCamelCase ):
if vertex not in self.adjacency:
__UpperCamelCase : Tuple = {}
self.num_vertices += 1
def lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
self.add_vertex(_lowerCamelCase )
self.add_vertex(_lowerCamelCase )
if head == tail:
return
__UpperCamelCase : List[Any] = weight
__UpperCamelCase : Optional[int] = weight
def lowerCAmelCase ( self ):
__UpperCamelCase : List[str] = self.get_edges()
for edge in edges:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[int] = edge
edges.remove((tail, head, weight) )
for i in range(len(_lowerCamelCase ) ):
__UpperCamelCase : Union[str, Any] = list(edges[i] )
edges.sort(key=lambda _lowerCamelCase : e[2] )
for i in range(len(_lowerCamelCase ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
__UpperCamelCase : Optional[int] = edges[i][2] + 1
for edge in edges:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[int] = edge
__UpperCamelCase : Tuple = weight
__UpperCamelCase : Optional[Any] = weight
def __str__( self ):
__UpperCamelCase : Optional[Any] = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
__UpperCamelCase : Tuple = self.adjacency[head][tail]
string += f"""{head} -> {tail} == {weight}\n"""
return string.rstrip('\n' )
def lowerCAmelCase ( self ):
__UpperCamelCase : Any = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def lowerCAmelCase ( self ):
return self.adjacency.keys()
@staticmethod
def lowerCAmelCase ( _lowerCamelCase=None , _lowerCamelCase=None ):
__UpperCamelCase : Tuple = Graph()
if vertices is None:
__UpperCamelCase : List[Any] = []
if edges is None:
__UpperCamelCase : Optional[int] = []
for vertex in vertices:
g.add_vertex(_lowerCamelCase )
for edge in edges:
g.add_edge(*_lowerCamelCase )
return g
class __lowercase :
"""simple docstring"""
def __init__( self ):
__UpperCamelCase : Union[str, Any] = {}
__UpperCamelCase : int = {}
def __len__( self ):
return len(self.parent )
def lowerCAmelCase ( self , _lowerCamelCase ):
if item in self.parent:
return self.find(_lowerCamelCase )
__UpperCamelCase : Optional[Any] = item
__UpperCamelCase : List[str] = 0
return item
def lowerCAmelCase ( self , _lowerCamelCase ):
if item not in self.parent:
return self.make_set(_lowerCamelCase )
if item != self.parent[item]:
__UpperCamelCase : int = self.find(self.parent[item] )
return self.parent[item]
def lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ):
__UpperCamelCase : Tuple = self.find(_lowerCamelCase )
__UpperCamelCase : Any = self.find(_lowerCamelCase )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
__UpperCamelCase : Dict = roota
return roota
if self.rank[roota] < self.rank[roota]:
__UpperCamelCase : Optional[Any] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
__UpperCamelCase : Dict = roota
return roota
return None
@staticmethod
def lowerCAmelCase ( _lowerCamelCase ):
__UpperCamelCase : int = graph.num_vertices
__UpperCamelCase : str = Graph.UnionFind()
__UpperCamelCase : Tuple = []
while num_components > 1:
__UpperCamelCase : Dict = {}
for vertex in graph.get_vertices():
__UpperCamelCase : str = -1
__UpperCamelCase : int = graph.get_edges()
for edge in edges:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[str] = edge
edges.remove((tail, head, weight) )
for edge in edges:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Dict = edge
__UpperCamelCase : List[Any] = union_find.find(_lowerCamelCase )
__UpperCamelCase : str = union_find.find(_lowerCamelCase )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__UpperCamelCase : str = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__UpperCamelCase : str = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[str] = cheap_edge[vertex]
if union_find.find(_lowerCamelCase ) != union_find.find(_lowerCamelCase ):
union_find.union(_lowerCamelCase , _lowerCamelCase )
mst_edges.append(cheap_edge[vertex] )
__UpperCamelCase : int = num_components - 1
__UpperCamelCase : Union[str, Any] = Graph.build(edges=_lowerCamelCase )
return mst
| 287 | 0 |
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_snake_case = logging.getLogger(__name__)
_snake_case = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_snake_case = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase :
__lowerCamelCase = field(
default=lowercase_ , metadata={
'help': (
'The model checkpoint for weights initialization. Leave None if you want to train a model from'
' scratch.'
)
} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(lowercase_ )} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class lowerCAmelCase :
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'The input training data file (a text file).'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={
'help': (
'The input training data files (multiple files in glob format). '
'Very often splitting large files to smaller files can prevent tokenizer going out of memory'
)
} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} )
__lowerCamelCase = field(default=lowercase_ , metadata={'help': 'Whether ot not to use whole word mask.'} )
__lowerCamelCase = field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
__lowerCamelCase = field(
default=1 / 6 , metadata={
'help': (
'Ratio of length of a span of masked tokens to surrounding context length for permutation language'
' modeling.'
)
} , )
__lowerCamelCase = field(
default=5 , metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} )
__lowerCamelCase = field(
default=-1 , metadata={
'help': (
'Optional input sequence length after tokenization.'
'The training dataset will be truncated in block of this size for training.'
'Default to the model max input length for single sentence inputs (take into account special tokens).'
)
} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def _A ( __magic_name__ , __magic_name__ , __magic_name__ = False , __magic_name__ = None , ):
def _dataset(__magic_name__ , __magic_name__=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("You need to set world whole masking and mlm to True for Chinese Whole Word Mask" )
return LineByLineWithRefDataset(
tokenizer=__magic_name__ , file_path=__magic_name__ , block_size=args.block_size , ref_path=__magic_name__ , )
return LineByLineTextDataset(tokenizer=__magic_name__ , file_path=__magic_name__ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=__magic_name__ , file_path=__magic_name__ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=__magic_name__ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(__magic_name__ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def _A ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowercase__ , lowercase__ , lowercase__ = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file "
"or remove the --do_eval argument." )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , __magic_name__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
lowercase__ = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowercase__ = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
lowercase__ = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.tokenizer_name:
lowercase__ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowercase__ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"
" script, save it,and load it from here, using --tokenizer_name" )
if model_args.model_name_or_path:
lowercase__ = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , )
else:
logger.info("Training new model from scratch" )
lowercase__ = AutoModelWithLMHead.from_config(__magic_name__ )
model.resize_token_embeddings(len(__magic_name__ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"
"--mlm flag (masked language modeling)." )
if data_args.block_size <= 0:
lowercase__ = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
lowercase__ = min(data_args.block_size , tokenizer.max_len )
# Get datasets
lowercase__ = (
get_dataset(__magic_name__ , tokenizer=__magic_name__ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
lowercase__ = (
get_dataset(__magic_name__ , tokenizer=__magic_name__ , evaluate=__magic_name__ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
lowercase__ = DataCollatorForPermutationLanguageModeling(
tokenizer=__magic_name__ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
lowercase__ = DataCollatorForWholeWordMask(
tokenizer=__magic_name__ , mlm_probability=data_args.mlm_probability )
else:
lowercase__ = DataCollatorForLanguageModeling(
tokenizer=__magic_name__ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowercase__ = Trainer(
model=__magic_name__ , args=__magic_name__ , data_collator=__magic_name__ , train_dataset=__magic_name__ , eval_dataset=__magic_name__ , prediction_loss_only=__magic_name__ , )
# Training
if training_args.do_train:
lowercase__ = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=__magic_name__ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase__ = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowercase__ = trainer.evaluate()
lowercase__ = math.exp(eval_output["eval_loss"] )
lowercase__ = {"perplexity": perplexity}
lowercase__ = os.path.join(training_args.output_dir , "eval_results_lm.txt" )
if trainer.is_world_master():
with open(__magic_name__ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , __magic_name__ , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
results.update(__magic_name__ )
return results
def _A ( __magic_name__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 655 |
import inspect
import unittest
class lowerCAmelCase ( unittest.TestCase ):
def UpperCAmelCase ( self :int ):
'''simple docstring'''
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
import diffusers
from diffusers.dependency_versions_table import deps
lowercase__ = inspect.getmembers(_lowercase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
lowercase__ = "k-diffusion"
elif backend == "invisible_watermark":
lowercase__ = "invisible-watermark"
assert backend in deps, f'''{backend} is not in the deps table!'''
| 655 | 1 |
def lowerCamelCase_ ( lowerCAmelCase: list[int] )-> Any:
_snake_case : int = len(_SCREAMING_SNAKE_CASE )
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(i + 1 , _SCREAMING_SNAKE_CASE ):
if numbers[j] < numbers[i]:
_snake_case , _snake_case : Optional[Any] = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
lowerCAmelCase_ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase_ = [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 707 |
import csv
import tweepy
# Twitter API credentials
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
def lowerCamelCase_ ( lowerCAmelCase: str )-> None:
# authorize twitter, initialize tweepy
_snake_case : Optional[Any] = tweepy.OAuthHandler(lowerCAmelCase , lowerCAmelCase )
auth.set_access_token(lowerCAmelCase , lowerCAmelCase )
_snake_case : List[Any] = tweepy.API(lowerCAmelCase )
# initialize a list to hold all the tweepy Tweets
_snake_case : Any = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_snake_case : List[str] = api.user_timeline(screen_name=lowerCAmelCase , count=2_00 )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# save the id of the oldest tweet less one
_snake_case : List[Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowerCAmelCase ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
_snake_case : Tuple = api.user_timeline(
screen_name=lowerCAmelCase , count=2_00 , max_id=lowerCAmelCase )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# update the id of the oldest tweet less one
_snake_case : List[str] = alltweets[-1].id - 1
print(F"""...{len(lowerCAmelCase )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
_snake_case : int = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , 'w' ) as f:
_snake_case : Any = csv.writer(lowerCAmelCase )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(lowerCAmelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 669 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Dict = logging.get_logger(__name__)
UpperCAmelCase__ : Union[str, Any] = {
'''asapp/sew-tiny-100k''': '''https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json''',
# See all SEW models at https://huggingface.co/models?filter=sew
}
class UpperCamelCase_ ( __UpperCamelCase ):
'''simple docstring'''
UpperCamelCase_ = """sew"""
def __init__( self , UpperCamelCase=32 , UpperCamelCase=7_68 , UpperCamelCase=12 , UpperCamelCase=12 , UpperCamelCase=30_72 , UpperCamelCase=2 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=0.0 , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=0.02 , UpperCamelCase=1E-5 , UpperCamelCase="group" , UpperCamelCase="gelu" , UpperCamelCase=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , UpperCamelCase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCamelCase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCamelCase=False , UpperCamelCase=1_28 , UpperCamelCase=16 , UpperCamelCase=True , UpperCamelCase=0.05 , UpperCamelCase=10 , UpperCamelCase=2 , UpperCamelCase=0.0 , UpperCamelCase=10 , UpperCamelCase=0 , UpperCamelCase="mean" , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=2_56 , UpperCamelCase=0 , UpperCamelCase=1 , UpperCamelCase=2 , **UpperCamelCase , ) -> int:
super().__init__(**__SCREAMING_SNAKE_CASE , pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : str = hidden_size
UpperCamelCase__ : Union[str, Any] = feat_extract_norm
UpperCamelCase__ : List[str] = feat_extract_activation
UpperCamelCase__ : Union[str, Any] = list(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Any = list(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : List[str] = list(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : str = conv_bias
UpperCamelCase__ : Dict = num_conv_pos_embeddings
UpperCamelCase__ : Tuple = num_conv_pos_embedding_groups
UpperCamelCase__ : List[Any] = len(self.conv_dim)
UpperCamelCase__ : Tuple = num_hidden_layers
UpperCamelCase__ : Union[str, Any] = intermediate_size
UpperCamelCase__ : Optional[Any] = squeeze_factor
UpperCamelCase__ : List[str] = hidden_act
UpperCamelCase__ : List[Any] = num_attention_heads
UpperCamelCase__ : List[str] = hidden_dropout
UpperCamelCase__ : Any = attention_dropout
UpperCamelCase__ : List[str] = activation_dropout
UpperCamelCase__ : int = feat_proj_dropout
UpperCamelCase__ : Union[str, Any] = final_dropout
UpperCamelCase__ : str = layerdrop
UpperCamelCase__ : Optional[Any] = layer_norm_eps
UpperCamelCase__ : Any = initializer_range
UpperCamelCase__ : Any = vocab_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
F"""but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)"""
F"""= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase__ : Optional[int] = apply_spec_augment
UpperCamelCase__ : str = mask_time_prob
UpperCamelCase__ : Tuple = mask_time_length
UpperCamelCase__ : List[str] = mask_time_min_masks
UpperCamelCase__ : int = mask_feature_prob
UpperCamelCase__ : Optional[int] = mask_feature_length
UpperCamelCase__ : List[Any] = mask_feature_min_masks
# ctc loss
UpperCamelCase__ : List[str] = ctc_loss_reduction
UpperCamelCase__ : Dict = ctc_zero_infinity
# sequence classification
UpperCamelCase__ : Any = use_weighted_layer_sum
UpperCamelCase__ : Any = classifier_proj_size
@property
def lowerCAmelCase__ ( self) -> Union[str, Any]:
return functools.reduce(operator.mul , self.conv_stride , 1)
| 410 |
import math
def lowercase_ ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
return math.pow(SCREAMING_SNAKE_CASE , 2 ) - a
def lowercase_ ( SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
return 2 * x
def lowercase_ ( SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
snake_case__ : Dict =2.0
while start <= a:
snake_case__ : List[Any] =math.pow(SCREAMING_SNAKE_CASE , 2 )
return start
def lowercase_ ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int = 99_99 , SCREAMING_SNAKE_CASE : float = 0.00_0000_0000_0001 ):
"""simple docstring"""
if a < 0:
raise ValueError('''math domain error''' )
snake_case__ : List[str] =get_initial_point(SCREAMING_SNAKE_CASE )
for _ in range(SCREAMING_SNAKE_CASE ):
snake_case__ : Union[str, Any] =value
snake_case__ : Any =value - fx(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) / fx_derivative(SCREAMING_SNAKE_CASE )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 381 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def A__ ( SCREAMING_SNAKE_CASE__) -> Tuple:
__snake_case: Tuple = botoa.client("""iam""")
__snake_case: Optional[Any] = {
"""Version""": """2012-10-17""",
"""Statement""": [
{"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=SCREAMING_SNAKE_CASE__ , AssumeRolePolicyDocument=json.dumps(SCREAMING_SNAKE_CASE__ , indent=2))
__snake_case: str = {
"""Version""": """2012-10-17""",
"""Statement""": [
{
"""Effect""": """Allow""",
"""Action""": [
"""sagemaker:*""",
"""ecr:GetDownloadUrlForLayer""",
"""ecr:BatchGetImage""",
"""ecr:BatchCheckLayerAvailability""",
"""ecr:GetAuthorizationToken""",
"""cloudwatch:PutMetricData""",
"""cloudwatch:GetMetricData""",
"""cloudwatch:GetMetricStatistics""",
"""cloudwatch:ListMetrics""",
"""logs:CreateLogGroup""",
"""logs:CreateLogStream""",
"""logs:DescribeLogStreams""",
"""logs:PutLogEvents""",
"""logs:GetLogEvents""",
"""s3:CreateBucket""",
"""s3:ListBucket""",
"""s3:GetBucketLocation""",
"""s3:GetObject""",
"""s3:PutObject""",
],
"""Resource""": """*""",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=SCREAMING_SNAKE_CASE__ , PolicyName=F'''{role_name}_policy_permission''' , PolicyDocument=json.dumps(SCREAMING_SNAKE_CASE__ , indent=2) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F'''role {role_name} already exists. Using existing one''')
def A__ ( SCREAMING_SNAKE_CASE__) -> Optional[Any]:
__snake_case: int = botoa.client("""iam""")
return iam_client.get_role(RoleName=SCREAMING_SNAKE_CASE__)["Role"]["Arn"]
def A__ ( ) -> Optional[int]:
__snake_case: str = _ask_options(
"""How do you want to authorize?""" , ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """] , SCREAMING_SNAKE_CASE__ , )
__snake_case: Optional[int] = None
if credentials_configuration == 0:
__snake_case: Optional[int] = _ask_field("""Enter your AWS Profile name: [default] """ , default="""default""")
__snake_case: Union[str, Any] = aws_profile
else:
print(
"""Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"""
"""`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""")
__snake_case: str = _ask_field("""AWS Access Key ID: """)
__snake_case: Optional[int] = aws_access_key_id
__snake_case: Dict = _ask_field("""AWS Secret Access Key: """)
__snake_case: int = aws_secret_access_key
__snake_case: int = _ask_field("""Enter your AWS Region: [us-east-1]""" , default="""us-east-1""")
__snake_case: List[str] = aws_region
__snake_case: Any = _ask_options(
"""Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""" , ["""Provide IAM Role name""", """Create new IAM role using credentials"""] , SCREAMING_SNAKE_CASE__ , )
if role_management == 0:
__snake_case: Any = _ask_field("""Enter your IAM role name: """)
else:
__snake_case: int = """accelerate_sagemaker_execution_role"""
print(F'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''')
_create_iam_role_for_sagemaker(SCREAMING_SNAKE_CASE__)
__snake_case: Tuple = _ask_field(
"""Do you want to use custom Docker image? [yes/NO]: """ , _convert_yes_no_to_bool , default=SCREAMING_SNAKE_CASE__ , error_message="""Please enter yes or no.""" , )
__snake_case: Optional[Any] = None
if is_custom_docker_image:
__snake_case: Optional[int] = _ask_field("""Enter your Docker image: """ , lambda SCREAMING_SNAKE_CASE__: str(SCREAMING_SNAKE_CASE__).lower())
__snake_case: List[Any] = _ask_field(
"""Do you want to provide SageMaker input channels with data locations? [yes/NO]: """ , _convert_yes_no_to_bool , default=SCREAMING_SNAKE_CASE__ , error_message="""Please enter yes or no.""" , )
__snake_case: List[str] = None
if is_sagemaker_inputs_enabled:
__snake_case: Union[str, Any] = _ask_field(
"""Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """ , lambda SCREAMING_SNAKE_CASE__: str(SCREAMING_SNAKE_CASE__).lower() , )
__snake_case: List[Any] = _ask_field(
"""Do you want to enable SageMaker metrics? [yes/NO]: """ , _convert_yes_no_to_bool , default=SCREAMING_SNAKE_CASE__ , error_message="""Please enter yes or no.""" , )
__snake_case: int = None
if is_sagemaker_metrics_enabled:
__snake_case: Union[str, Any] = _ask_field(
"""Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """ , lambda SCREAMING_SNAKE_CASE__: str(SCREAMING_SNAKE_CASE__).lower() , )
__snake_case: Optional[Any] = _ask_options(
"""What is the distributed mode?""" , ["""No distributed training""", """Data parallelism"""] , _convert_sagemaker_distributed_mode , )
__snake_case: Any = {}
__snake_case: int = _ask_field(
"""Do you wish to optimize your script with torch dynamo?[yes/NO]:""" , _convert_yes_no_to_bool , default=SCREAMING_SNAKE_CASE__ , error_message="""Please enter yes or no.""" , )
if use_dynamo:
__snake_case: Optional[int] = """dynamo_"""
__snake_case: Dict = _ask_options(
"""Which dynamo backend would you like to use?""" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
__snake_case: Tuple = _ask_field(
"""Do you want to customize the defaults sent to torch.compile? [yes/NO]: """ , _convert_yes_no_to_bool , default=SCREAMING_SNAKE_CASE__ , error_message="""Please enter yes or no.""" , )
if use_custom_options:
__snake_case: Optional[int] = _ask_options(
"""Which mode do you want to use?""" , SCREAMING_SNAKE_CASE__ , lambda SCREAMING_SNAKE_CASE__: TORCH_DYNAMO_MODES[int(SCREAMING_SNAKE_CASE__)] , default="""default""" , )
__snake_case: Optional[int] = _ask_field(
"""Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """ , _convert_yes_no_to_bool , default=SCREAMING_SNAKE_CASE__ , error_message="""Please enter yes or no.""" , )
__snake_case: Tuple = _ask_field(
"""Do you want to enable dynamic shape tracing? [yes/NO]: """ , _convert_yes_no_to_bool , default=SCREAMING_SNAKE_CASE__ , error_message="""Please enter yes or no.""" , )
__snake_case: List[str] = """Which EC2 instance type you want to use for your training?"""
if distributed_type != SageMakerDistributedType.NO:
__snake_case: Optional[int] = _ask_options(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lambda SCREAMING_SNAKE_CASE__: SAGEMAKER_PARALLEL_EC2_INSTANCES[int(SCREAMING_SNAKE_CASE__)])
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
__snake_case: Tuple = _ask_field(SCREAMING_SNAKE_CASE__ , lambda SCREAMING_SNAKE_CASE__: str(SCREAMING_SNAKE_CASE__).lower() , default="""ml.p3.2xlarge""")
__snake_case: Any = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
__snake_case: Optional[int] = _ask_field(
"""How many machines do you want use? [1]: """ , SCREAMING_SNAKE_CASE__ , default=1 , )
__snake_case: Tuple = _ask_options(
"""Do you wish to use FP16 or BF16 (mixed precision)?""" , ["""no""", """fp16""", """bf16""", """fp8"""] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"""Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""")
return SageMakerConfig(
image_uri=SCREAMING_SNAKE_CASE__ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=SCREAMING_SNAKE_CASE__ , use_cpu=SCREAMING_SNAKE_CASE__ , dynamo_config=SCREAMING_SNAKE_CASE__ , eca_instance_type=SCREAMING_SNAKE_CASE__ , profile=SCREAMING_SNAKE_CASE__ , region=SCREAMING_SNAKE_CASE__ , iam_role_name=SCREAMING_SNAKE_CASE__ , mixed_precision=SCREAMING_SNAKE_CASE__ , num_machines=SCREAMING_SNAKE_CASE__ , sagemaker_inputs_file=SCREAMING_SNAKE_CASE__ , sagemaker_metrics_file=SCREAMING_SNAKE_CASE__ , )
| 155 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : Optional[int] , A : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
super().__init__()
__snake_case: Tuple = nn.ModuleList(A )
def UpperCAmelCase__ ( self : Union[str, Any] , A : torch.FloatTensor , A : Union[torch.Tensor, float, int] , A : torch.Tensor , A : List[torch.tensor] , A : List[float] , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[Dict[str, Any]] = None , A : bool = False , A : bool = True , ):
for i, (image, scale, controlnet) in enumerate(zip(A , A , self.nets ) ):
__snake_case , __snake_case: str = controlnet(
A , A , A , A , A , A , A , A , A , A , A , )
# merge samples
if i == 0:
__snake_case , __snake_case: Optional[int] = down_samples, mid_sample
else:
__snake_case: Dict = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(A , A )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def UpperCAmelCase__ ( self : Dict , A : Union[str, os.PathLike] , A : bool = True , A : Callable = None , A : bool = False , A : Optional[str] = None , ):
__snake_case: List[str] = 0
__snake_case: Optional[int] = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
A , is_main_process=A , save_function=A , safe_serialization=A , variant=A , )
idx += 1
__snake_case: List[Any] = model_path_to_save + f'''_{idx}'''
@classmethod
def UpperCAmelCase__ ( cls : Any , A : Optional[Union[str, os.PathLike]] , **A : str ):
__snake_case: str = 0
__snake_case: str = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
__snake_case: Optional[Any] = pretrained_model_path
while os.path.isdir(A ):
__snake_case: Dict = ControlNetModel.from_pretrained(A , **A )
controlnets.append(A )
idx += 1
__snake_case: Dict = pretrained_model_path + f'''_{idx}'''
logger.info(f'''{len(A )} controlnets loaded from {pretrained_model_path}.''' )
if len(A ) == 0:
raise ValueError(
f'''No ControlNets found under {os.path.dirname(A )}. Expected at least {pretrained_model_path + '_0'}.''' )
return cls(A )
| 155 | 1 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowercase__ :
"""simple docstring"""
def __init__( self : str , __a : List[str] , __a : Union[str, Any]=3 , __a : Optional[Any]=3_2 , __a : Dict=3 , __a : Any=1_0 , __a : Optional[Any]=[8, 1_6, 3_2, 6_4] , __a : Dict=[1, 1, 2, 1] , __a : Tuple=True , __a : Optional[Any]=True , __a : Dict="relu" , __a : Any=3 , __a : List[str]=None , __a : Tuple=["stage2", "stage3", "stage4"] , __a : int=[2, 3, 4] , __a : Optional[Any]=1 , ):
snake_case__ : Dict = parent
snake_case__ : Any = batch_size
snake_case__ : Union[str, Any] = image_size
snake_case__ : List[Any] = num_channels
snake_case__ : Any = embeddings_size
snake_case__ : str = hidden_sizes
snake_case__ : Optional[int] = depths
snake_case__ : Dict = is_training
snake_case__ : Optional[Any] = use_labels
snake_case__ : List[str] = hidden_act
snake_case__ : List[Any] = num_labels
snake_case__ : str = scope
snake_case__ : Union[str, Any] = len(__a )
snake_case__ : Tuple = out_features
snake_case__ : int = out_indices
snake_case__ : int = num_groups
def lowercase ( self : int ):
snake_case__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Union[str, Any] = None
if self.use_labels:
snake_case__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : List[str] = self.get_config()
return config, pixel_values, labels
def lowercase ( self : Any ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def lowercase ( self : int , __a : str , __a : Optional[int] , __a : Dict ):
snake_case__ : Optional[int] = BitModel(config=__a )
model.to(__a )
model.eval()
snake_case__ : Any = model(__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def lowercase ( self : Dict , __a : Optional[Any] , __a : Union[str, Any] , __a : List[str] ):
snake_case__ : Dict = self.num_labels
snake_case__ : Dict = BitForImageClassification(__a )
model.to(__a )
model.eval()
snake_case__ : List[Any] = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : Union[str, Any] , __a : int , __a : int , __a : Optional[int] ):
snake_case__ : int = BitBackbone(config=__a )
model.to(__a )
model.eval()
snake_case__ : Dict = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
snake_case__ : str = None
snake_case__ : Dict = BitBackbone(config=__a )
model.to(__a )
model.eval()
snake_case__ : Optional[int] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowercase ( self : Optional[Any] ):
snake_case__ : Optional[int] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : Optional[Any] = config_and_inputs
snake_case__ : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ (__snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
__UpperCamelCase : List[str] = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase : List[Any] = False
__UpperCamelCase : List[Any] = False
__UpperCamelCase : Optional[Any] = False
__UpperCamelCase : Tuple = False
__UpperCamelCase : int = False
def lowercase ( self : List[str] ):
snake_case__ : Tuple = BitModelTester(self )
snake_case__ : Any = ConfigTester(self , config_class=__a , has_text_modality=__a )
def lowercase ( self : Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self : Optional[int] ):
return
@unittest.skip(reason="""Bit does not output attentions""" )
def lowercase ( self : Optional[int] ):
pass
@unittest.skip(reason="""Bit does not use inputs_embeds""" )
def lowercase ( self : Optional[int] ):
pass
@unittest.skip(reason="""Bit does not support input and output embeddings""" )
def lowercase ( self : Dict ):
pass
def lowercase ( self : Any ):
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[Any] = model_class(__a )
snake_case__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : List[str] = [*signature.parameters.keys()]
snake_case__ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowercase ( self : Any ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowercase ( self : Any ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
def lowercase ( self : List[str] ):
snake_case__ , snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[str] = model_class(config=__a )
for name, module in model.named_modules():
if isinstance(__a , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
def lowercase ( self : List[str] ):
def check_hidden_states_output(__a : List[Any] , __a : List[str] , __a : str ):
snake_case__ : Tuple = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
snake_case__ : List[str] = model(**self._prepare_for_class(__a , __a ) )
snake_case__ : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case__ : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(__a ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case__ , snake_case__ : int = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Union[str, Any] = ["""preactivation""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
snake_case__ : Any = layer_type
snake_case__ : Optional[Any] = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : List[str] = True
check_hidden_states_output(__a , __a , __a )
@unittest.skip(reason="""Bit does not use feedforward chunking""" )
def lowercase ( self : Optional[Any] ):
pass
def lowercase ( self : int ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def lowercase ( self : List[Any] ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Any = BitModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def _lowercase ( ):
"""simple docstring"""
snake_case__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
return image
@require_torch
@require_vision
class lowercase__ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase ( self : Dict ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def lowercase ( self : Optional[int] ):
snake_case__ : List[str] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__a )
snake_case__ : int = self.default_image_processor
snake_case__ : Optional[int] = prepare_img()
snake_case__ : Optional[Any] = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
snake_case__ : Dict = model(**__a )
# verify the logits
snake_case__ : Any = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __a )
snake_case__ : str = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
@require_torch
class lowercase__ (__snake_case , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : int = (BitBackbone,) if is_torch_available() else ()
__UpperCamelCase : Optional[Any] = BitConfig
__UpperCamelCase : int = False
def lowercase ( self : Optional[int] ):
snake_case__ : Optional[int] = BitModelTester(self )
| 648 |
import torch
from diffusers import DiffusionPipeline
class lowercase__ (__snake_case ):
"""simple docstring"""
def __init__( self : List[Any] , __a : Optional[Any] , __a : List[str] ):
super().__init__()
self.register_modules(unet=__a , scheduler=__a )
def __call__( self : Union[str, Any] ):
snake_case__ : int = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
snake_case__ : Dict = 1
snake_case__ : str = self.unet(__a , __a ).sample
snake_case__ : Tuple = self.scheduler.step(__a , __a , __a ).prev_sample
snake_case__ : Optional[int] = scheduler_output - scheduler_output + torch.ones_like(__a )
return result
| 648 | 1 |
"""simple docstring"""
from math import isclose, sqrt
def lowercase (snake_case__ : float , snake_case__ : float , snake_case__ : float ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase = point_y / 4 / point_x
lowerCAmelCase = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowerCAmelCase = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowerCAmelCase = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowerCAmelCase = outgoing_gradient**2 + 4
lowerCAmelCase = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowerCAmelCase = (point_y - outgoing_gradient * point_x) ** 2 - 100
lowerCAmelCase = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
lowerCAmelCase = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowerCAmelCase = x_minus if isclose(A__ , A__ ) else x_plus
lowerCAmelCase = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def lowercase (snake_case__ : float = 1.4 , snake_case__ : float = -9.6 ) -> List[str]:
'''simple docstring'''
lowerCAmelCase = 0
lowerCAmelCase = first_x_coord
lowerCAmelCase = first_y_coord
lowerCAmelCase = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = next_point(A__ , A__ , A__ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f"""{solution() = }""")
| 714 |
"""simple docstring"""
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
_a = 42
_a = 42
_a = 0.0
_a = 1
_a = 1
_a = True
_a = False
_a = False
_a = False
_a = jnp.floataa
def __lowercase ( self : List[str] ):
lowerCAmelCase = []
lowerCAmelCase = []
for i in range(self.num_layers ):
lowerCAmelCase = self.in_channels if i == 0 else self.out_channels
lowerCAmelCase = FlaxResnetBlockaD(
in_channels=lowerCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase )
lowerCAmelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCAmelCase )
lowerCAmelCase = resnets
lowerCAmelCase = attentions
if self.add_downsample:
lowerCAmelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : int=True ):
lowerCAmelCase = ()
for resnet, attn in zip(self.resnets , self.attentions ):
lowerCAmelCase = resnet(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase )
lowerCAmelCase = attn(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
lowerCAmelCase = self.downsamplers_a(lowerCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
_a = 42
_a = 42
_a = 0.0
_a = 1
_a = True
_a = jnp.floataa
def __lowercase ( self : Union[str, Any] ):
lowerCAmelCase = []
for i in range(self.num_layers ):
lowerCAmelCase = self.in_channels if i == 0 else self.out_channels
lowerCAmelCase = FlaxResnetBlockaD(
in_channels=lowerCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase )
lowerCAmelCase = resnets
if self.add_downsample:
lowerCAmelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : int , lowerCAmelCase : Any , lowerCAmelCase : Any , lowerCAmelCase : List[str]=True ):
lowerCAmelCase = ()
for resnet in self.resnets:
lowerCAmelCase = resnet(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
lowerCAmelCase = self.downsamplers_a(lowerCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
_a = 42
_a = 42
_a = 42
_a = 0.0
_a = 1
_a = 1
_a = True
_a = False
_a = False
_a = False
_a = jnp.floataa
def __lowercase ( self : List[str] ):
lowerCAmelCase = []
lowerCAmelCase = []
for i in range(self.num_layers ):
lowerCAmelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
lowerCAmelCase = self.prev_output_channel if i == 0 else self.out_channels
lowerCAmelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase )
lowerCAmelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCAmelCase )
lowerCAmelCase = resnets
lowerCAmelCase = attentions
if self.add_upsample:
lowerCAmelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : Dict=True ):
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
lowerCAmelCase = res_hidden_states_tuple[-1]
lowerCAmelCase = res_hidden_states_tuple[:-1]
lowerCAmelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
lowerCAmelCase = resnet(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase )
lowerCAmelCase = attn(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase )
if self.add_upsample:
lowerCAmelCase = self.upsamplers_a(lowerCAmelCase )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
_a = 42
_a = 42
_a = 42
_a = 0.0
_a = 1
_a = True
_a = jnp.floataa
def __lowercase ( self : Tuple ):
lowerCAmelCase = []
for i in range(self.num_layers ):
lowerCAmelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
lowerCAmelCase = self.prev_output_channel if i == 0 else self.out_channels
lowerCAmelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase )
lowerCAmelCase = resnets
if self.add_upsample:
lowerCAmelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Optional[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : Dict=True ):
for resnet in self.resnets:
# pop res hidden states
lowerCAmelCase = res_hidden_states_tuple[-1]
lowerCAmelCase = res_hidden_states_tuple[:-1]
lowerCAmelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
lowerCAmelCase = resnet(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase )
if self.add_upsample:
lowerCAmelCase = self.upsamplers_a(lowerCAmelCase )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
_a = 42
_a = 0.0
_a = 1
_a = 1
_a = False
_a = False
_a = jnp.floataa
def __lowercase ( self : List[Any] ):
# there is always at least one resnet
lowerCAmelCase = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
lowerCAmelCase = []
for _ in range(self.num_layers ):
lowerCAmelCase = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCAmelCase )
lowerCAmelCase = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase )
lowerCAmelCase = resnets
lowerCAmelCase = attentions
def __call__( self : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Dict , lowerCAmelCase : List[Any]=True ):
lowerCAmelCase = self.resnets[0](lowerCAmelCase , lowerCAmelCase )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
lowerCAmelCase = attn(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase )
lowerCAmelCase = resnet(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase )
return hidden_states
| 529 | 0 |
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
__lowerCAmelCase ={1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class __magic_name__ ( nn.Module):
def __init__( self : Any ,__SCREAMING_SNAKE_CASE : Dict ):
super().__init__()
UpperCAmelCase = torchvision.models.resnetaaa(pretrained=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = list(model.children() )[:-2]
UpperCAmelCase = nn.Sequential(*__SCREAMING_SNAKE_CASE )
UpperCAmelCase = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def _UpperCAmelCase ( self : Union[str, Any] ,__SCREAMING_SNAKE_CASE : Optional[Any] ):
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
UpperCAmelCase = self.pool(self.model(__SCREAMING_SNAKE_CASE ) )
UpperCAmelCase = torch.flatten(__SCREAMING_SNAKE_CASE ,start_dim=2 )
UpperCAmelCase = out.transpose(1 ,2 ).contiguous()
return out # BxNx2048
class __magic_name__ ( _a):
def __init__( self : str ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : Optional[int] ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : List[Any] ):
UpperCAmelCase = [json.loads(__SCREAMING_SNAKE_CASE ) for l in open(__SCREAMING_SNAKE_CASE )]
UpperCAmelCase = os.path.dirname(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = tokenizer
UpperCAmelCase = labels
UpperCAmelCase = len(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = max_seq_length
UpperCAmelCase = transforms
def __len__( self : int ):
return len(self.data )
def __getitem__( self : Optional[int] ,__SCREAMING_SNAKE_CASE : Any ):
UpperCAmelCase = torch.LongTensor(self.tokenizer.encode(self.data[index]["text"] ,add_special_tokens=__SCREAMING_SNAKE_CASE ) )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = sentence[0], sentence[1:-1], sentence[-1]
UpperCAmelCase = sentence[: self.max_seq_length]
UpperCAmelCase = torch.zeros(self.n_classes )
UpperCAmelCase = 1
UpperCAmelCase = Image.open(os.path.join(self.data_dir ,self.data[index]["img"] ) ).convert("RGB" )
UpperCAmelCase = self.transforms(__SCREAMING_SNAKE_CASE )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def _UpperCAmelCase ( self : Union[str, Any] ):
UpperCAmelCase = Counter()
for row in self.data:
label_freqs.update(row["label"] )
return label_freqs
def __UpperCamelCase ( _lowerCAmelCase ):
"""simple docstring"""
UpperCAmelCase = [len(row["sentence"] ) for row in batch]
UpperCAmelCase , UpperCAmelCase = len(_lowerCAmelCase ), max(_lowerCAmelCase )
UpperCAmelCase = torch.zeros(_lowerCAmelCase , _lowerCAmelCase , dtype=torch.long )
UpperCAmelCase = torch.zeros(_lowerCAmelCase , _lowerCAmelCase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(_lowerCAmelCase , _lowerCAmelCase ) ):
UpperCAmelCase = input_row["sentence"]
UpperCAmelCase = 1
UpperCAmelCase = torch.stack([row["image"] for row in batch] )
UpperCAmelCase = torch.stack([row["label"] for row in batch] )
UpperCAmelCase = torch.stack([row["image_start_token"] for row in batch] )
UpperCAmelCase = torch.stack([row["image_end_token"] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def __UpperCamelCase ( ):
"""simple docstring"""
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def __UpperCamelCase ( ):
"""simple docstring"""
return transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4677_7044, 0.4453_1429, 0.4066_1017] , std=[0.1222_1994, 0.1214_5835, 0.1438_0469] , ),
] )
| 333 |
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class __magic_name__ ( unittest.TestCase):
def __init__( self : List[str] ,__SCREAMING_SNAKE_CASE : Tuple ,__SCREAMING_SNAKE_CASE : List[str]=1_3 ,__SCREAMING_SNAKE_CASE : Union[str, Any]=7 ,__SCREAMING_SNAKE_CASE : str=True ,__SCREAMING_SNAKE_CASE : Union[str, Any]=True ,__SCREAMING_SNAKE_CASE : List[Any]=True ,__SCREAMING_SNAKE_CASE : Tuple=True ,__SCREAMING_SNAKE_CASE : Dict=9_9 ,__SCREAMING_SNAKE_CASE : List[Any]=3_2 ,__SCREAMING_SNAKE_CASE : Dict=5 ,__SCREAMING_SNAKE_CASE : Optional[Any]=4 ,__SCREAMING_SNAKE_CASE : Union[str, Any]=3_7 ,__SCREAMING_SNAKE_CASE : Optional[Any]="gelu" ,__SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 ,__SCREAMING_SNAKE_CASE : Any=0.1 ,__SCREAMING_SNAKE_CASE : Union[str, Any]=5_1_2 ,__SCREAMING_SNAKE_CASE : int=1_6 ,__SCREAMING_SNAKE_CASE : Dict=2 ,__SCREAMING_SNAKE_CASE : Dict=0.02 ,__SCREAMING_SNAKE_CASE : Optional[int]=4 ,):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_attention_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_choices
def _UpperCAmelCase ( self : Dict ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase = None
if self.use_attention_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__SCREAMING_SNAKE_CASE ,initializer_range=self.initializer_range ,)
return config, input_ids, token_type_ids, attention_mask
def _UpperCAmelCase ( self : Any ):
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def _UpperCAmelCase ( self : Any ):
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = True
UpperCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class __magic_name__ ( _a , unittest.TestCase):
_UpperCAmelCase : Optional[int] = True
_UpperCAmelCase : str = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _UpperCAmelCase ( self : Union[str, Any] ):
UpperCAmelCase = FlaxRobertaPreLayerNormModelTester(self )
@slow
def _UpperCAmelCase ( self : Optional[Any] ):
for model_class_name in self.all_model_classes:
UpperCAmelCase = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" ,from_pt=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_flax
class __magic_name__ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self : Optional[int] ):
UpperCAmelCase = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" ,from_pt=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] ,dtype=jnp.intaa )
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE )[0]
UpperCAmelCase = [1, 1_1, 5_0_2_6_5]
self.assertEqual(list(output.shape ) ,__SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
UpperCAmelCase = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] ,dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] ,__SCREAMING_SNAKE_CASE ,atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self : Dict ):
UpperCAmelCase = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" ,from_pt=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] ,dtype=jnp.intaa )
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE )[0]
# compare the actual values for a slice.
UpperCAmelCase = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] ,dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] ,__SCREAMING_SNAKE_CASE ,atol=1e-4 ) )
| 333 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : int = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = "mobilenet_v1"
def __init__( self : Optional[int], _UpperCAmelCase : List[Any]=3, _UpperCAmelCase : Union[str, Any]=2_2_4, _UpperCAmelCase : Optional[int]=1.0, _UpperCAmelCase : int=8, _UpperCAmelCase : List[Any]="relu6", _UpperCAmelCase : Optional[Any]=True, _UpperCAmelCase : List[str]=0.999, _UpperCAmelCase : Optional[int]=0.02, _UpperCAmelCase : Any=0.001, **_UpperCAmelCase : Optional[int], ) -> Dict:
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
SCREAMING_SNAKE_CASE__ : Dict = num_channels
SCREAMING_SNAKE_CASE__ : Tuple = image_size
SCREAMING_SNAKE_CASE__ : List[str] = depth_multiplier
SCREAMING_SNAKE_CASE__ : List[Any] = min_depth
SCREAMING_SNAKE_CASE__ : str = hidden_act
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf_padding
SCREAMING_SNAKE_CASE__ : Any = classifier_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : List[str] = layer_norm_eps
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = version.parse("1.11" )
@property
def A_ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def A_ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def A_ ( self : List[Any] ) -> float:
"""simple docstring"""
return 1E-4
| 708 |
import math
def _a ( SCREAMING_SNAKE_CASE__ : int ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(SCREAMING_SNAKE_CASE__ )
def _a ( SCREAMING_SNAKE_CASE__ : float = 1 / 1_23_45 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = 0
SCREAMING_SNAKE_CASE__ : List[Any] = 0
SCREAMING_SNAKE_CASE__ : List[str] = 3
while True:
SCREAMING_SNAKE_CASE__ : Optional[int] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Any = int(SCREAMING_SNAKE_CASE__ )
total_partitions += 1
if check_partition_perfect(SCREAMING_SNAKE_CASE__ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(SCREAMING_SNAKE_CASE__ )
integer += 1
if __name__ == "__main__":
print(f"{solution() = }")
| 157 | 0 |
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) -> Dict:
"""simple docstring"""
if attention_mask is None:
_A = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_A = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_A = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=A__ )
if decoder_head_mask is None:
_A = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=A__ )
if cross_attn_head_mask is None:
_A = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=A__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class lowerCamelCase:
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=False , snake_case_=99 , snake_case_=16 , snake_case_=2 , snake_case_=4 , snake_case_=4 , snake_case_="relu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=20 , snake_case_=2 , snake_case_=1 , snake_case_=0 , ):
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = encoder_layerdrop
_A = decoder_layerdrop
_A = max_position_embeddings
_A = eos_token_id
_A = pad_token_id
_A = bos_token_id
def lowerCAmelCase__ ( self ):
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = self.eos_token_id # Eos Token
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_A = input_ids.clamp(self.pad_token_id + 1 )
_A = decoder_input_ids.clamp(self.pad_token_id + 1 )
_A = self.get_config()
_A = prepare_mam_aaa_inputs_dict(_lowercase , _lowercase , _lowercase )
return config, inputs_dict
def lowerCAmelCase__ ( self ):
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def lowerCAmelCase__ ( self ):
_A = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
_A = MaMaaaModel(config=_lowercase ).get_decoder().to(_lowercase ).eval()
_A = inputs_dict['''input_ids''']
_A = inputs_dict['''attention_mask''']
_A = inputs_dict['''head_mask''']
# first forward pass
_A = model(_lowercase , attention_mask=_lowercase , head_mask=_lowercase , use_cache=_lowercase )
_A = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
_A = ids_tensor((self.batch_size, 3) , config.vocab_size )
_A = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
_A = torch.cat([input_ids, next_tokens] , dim=-1 )
_A = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
_A = model(_lowercase , attention_mask=_lowercase )['''last_hidden_state''']
_A = model(_lowercase , attention_mask=_lowercase , past_key_values=_lowercase )[
'''last_hidden_state'''
]
# select random slice
_A = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_A = output_from_no_past[:, -3:, random_slice_idx].detach()
_A = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-2 ) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
_A = MaMaaaModel(config=_lowercase ).to(_lowercase ).eval()
_A = model(**_lowercase )
_A = outputs.encoder_last_hidden_state
_A = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_A = model.get_encoder()
encoder.save_pretrained(_lowercase )
_A = MaMaaaEncoder.from_pretrained(_lowercase ).to(_lowercase )
_A = encoder(inputs_dict['input_ids'] , attention_mask=inputs_dict['attention_mask'] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
_A = model.get_decoder()
decoder.save_pretrained(_lowercase )
_A = MaMaaaDecoder.from_pretrained(_lowercase ).to(_lowercase )
_A = decoder(
input_ids=inputs_dict['decoder_input_ids'] , attention_mask=inputs_dict['decoder_attention_mask'] , encoder_hidden_states=_lowercase , encoder_attention_mask=inputs_dict['attention_mask'] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class lowerCamelCase( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__magic_name__ = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
__magic_name__ = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
__magic_name__ = (
{
'''conversational''': MaMaaaForConditionalGeneration,
'''feature-extraction''': MaMaaaModel,
'''summarization''': MaMaaaForConditionalGeneration,
'''text2text-generation''': MaMaaaForConditionalGeneration,
'''translation''': MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
__magic_name__ = True
__magic_name__ = True
__magic_name__ = False
__magic_name__ = False
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def lowerCAmelCase__ ( self ):
_A = MaMaaaModelTester(self )
_A = ConfigTester(self , config_class=_lowercase )
def lowerCAmelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_A = model_class(_lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowercase )
_A = model_class.from_pretrained(_lowercase , output_loading_info=_lowercase )
self.assertEqual(info['missing_keys'] , [] )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*_lowercase )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*_lowercase )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
_A = model_class(_lowercase )
model.to(_lowercase )
model.eval()
_A = copy.deepcopy(self._prepare_for_class(_lowercase , _lowercase ) )
if not self.is_encoder_decoder:
_A = inputs['''input_ids''']
del inputs["input_ids"]
else:
_A = inputs['''input_ids''']
_A = inputs.get('decoder_input_ids' , _lowercase )
del inputs["input_ids"]
inputs.pop('decoder_input_ids' , _lowercase )
_A = model.get_input_embeddings()
if not self.is_encoder_decoder:
_A = wte(_lowercase )
else:
_A = wte(_lowercase )
_A = wte(_lowercase )
with torch.no_grad():
model(**_lowercase )[0]
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
_A = input_dict['''input_ids''']
_A = input_ids.ne(1 ).to(_lowercase )
_A = MaMaaaForConditionalGeneration(_lowercase ).eval().to(_lowercase )
if torch_device == "cuda":
model.half()
model.generate(_lowercase , attention_mask=_lowercase )
model.generate(num_beams=4 , do_sample=_lowercase , early_stopping=_lowercase , num_return_sequences=3 )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
return torch.tensor(A__ , dtype=torch.long , device=A__ )
__A : int = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase__ ( self ):
return MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' )
def lowerCAmelCase__ ( self ):
_A = MaMaaaModel.from_pretrained('facebook/m2m100_418M' ).to(_lowercase )
_A = _long_tensor([[12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38, 2]] )
_A = _long_tensor([[2, 12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38]] )
_A = prepare_mam_aaa_inputs_dict(model.config , _lowercase , _lowercase )
with torch.no_grad():
_A = model(**_lowercase )[0]
_A = torch.Size((1, 11, 1024) )
self.assertEqual(output.shape , _lowercase )
# change to expected output here
_A = torch.tensor(
[[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]] , device=_lowercase )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowercase , atol=_lowercase ) )
def lowerCAmelCase__ ( self ):
_A = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(_lowercase )
# change to intended input
_A = _long_tensor([[12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38, 2]] )
_A = _long_tensor([[2, 12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38]] )
_A = prepare_mam_aaa_inputs_dict(model.config , _lowercase , _lowercase )
with torch.no_grad():
_A = model(**_lowercase )[0]
_A = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , _lowercase )
# change to expected output here
_A = torch.tensor(
[[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]] , device=_lowercase )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowercase , atol=_lowercase ) )
def lowerCAmelCase__ ( self ):
_A = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(_lowercase )
_A = MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' , src_lang='fr' , tgt_lang='en' )
_A = [
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent'''
''' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de'''
''' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.''',
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
_A = tokenizer(_lowercase , padding=_lowercase , return_tensors='pt' )
_A = model.generate(
input_ids=dct['input_ids'].to(_lowercase ) , attention_mask=dct['attention_mask'].to(_lowercase ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id('en' ) , )
_A = [
'''The NSA case highlights the total absence of intelligence debate''',
'''I think there are two levels of response from the French government.''',
'''When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.'''
''' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all'''
''' communications in France.''',
]
_A = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=_lowercase , skip_special_tokens=_lowercase )
assert generated == expected_en
| 27 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowercase ( _UpperCAmelCase ):
def lowercase__ ( self : Optional[int] ):
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(_lowercase )
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : List[Any] = self._create_example_records()
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list(_lowercase )
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''] )
for i, r in enumerate(_lowercase ):
self.assertDictEqual(_lowercase , example_records[i] )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Dict = self._create_example_records()
SCREAMING_SNAKE_CASE__ : Optional[int] = Dataset.from_list(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def lowercase__ ( self : List[Any] ): # checks what happens with missing columns
SCREAMING_SNAKE_CASE__ : List[str] = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Dataset.from_list(_lowercase )
self.assertDictEqual(dset[0] , {'''col_1''': 1} )
self.assertDictEqual(dset[1] , {'''col_1''': None} ) # NB: first record is used for columns
def lowercase__ ( self : int ): # checks if the type can be inferred from the second record
SCREAMING_SNAKE_CASE__ : int = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list(_lowercase )
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''' ) ) )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list([] )
self.assertEqual(len(_lowercase ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 35 | 0 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
__lowerCamelCase = '''pytorch_model.bin'''
__lowerCamelCase = '''pytorch_model.bin.index.json'''
__lowerCamelCase = '''adapter_config.json'''
__lowerCamelCase = '''adapter_model.bin'''
__lowerCamelCase = '''adapter_model.safetensors'''
__lowerCamelCase = '''tf_model.h5'''
__lowerCamelCase = '''tf_model.h5.index.json'''
__lowerCamelCase = '''model.ckpt'''
__lowerCamelCase = '''flax_model.msgpack'''
__lowerCamelCase = '''flax_model.msgpack.index.json'''
__lowerCamelCase = '''model.safetensors'''
__lowerCamelCase = '''model.safetensors.index.json'''
__lowerCamelCase = '''config.json'''
__lowerCamelCase = '''preprocessor_config.json'''
__lowerCamelCase = FEATURE_EXTRACTOR_NAME
__lowerCamelCase = '''generation_config.json'''
__lowerCamelCase = '''modelcard.json'''
__lowerCamelCase = '''▁'''
__lowerCamelCase = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
__lowerCamelCase = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
__lowerCamelCase = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
__lowerCamelCase = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def _snake_case ( __snake_case ) -> Dict:
'''simple docstring'''
if version.parse(__snake_case ) < version.parse(__snake_case ):
if "dev" in min_version:
UpperCAmelCase_ : int = (
"This example requires a source install from HuggingFace Transformers (see "
"`https://huggingface.co/docs/transformers/installation#install-from-source`),"
)
else:
UpperCAmelCase_ : List[str] = F"""This example requires a minimum version of {min_version},"""
error_message += F""" but the version found is {__version__}.\n"""
raise ImportError(
error_message
+ "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other "
"versions of HuggingFace Transformers." )
| 455 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class snake_case_ (lowercase__ ):
"""simple docstring"""
_lowerCamelCase = """lxmert"""
_lowerCamelCase = {}
def __init__( self ,lowercase=30522 ,lowercase=768 ,lowercase=12 ,lowercase=9500 ,lowercase=1600 ,lowercase=400 ,lowercase=3072 ,lowercase="gelu" ,lowercase=0.1 ,lowercase=0.1 ,lowercase=512 ,lowercase=2 ,lowercase=0.02 ,lowercase=1E-12 ,lowercase=9 ,lowercase=5 ,lowercase=5 ,lowercase=2048 ,lowercase=4 ,lowercase=6.67 ,lowercase=True ,lowercase=True ,lowercase=True ,lowercase=True ,lowercase=True ,lowercase=True ,lowercase=True ,**lowercase ,):
"""simple docstring"""
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : List[str] = num_attention_heads
UpperCAmelCase_ : Dict = hidden_act
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : str = type_vocab_size
UpperCAmelCase_ : Any = initializer_range
UpperCAmelCase_ : Optional[Any] = layer_norm_eps
UpperCAmelCase_ : Any = num_qa_labels
UpperCAmelCase_ : str = num_object_labels
UpperCAmelCase_ : Dict = num_attr_labels
UpperCAmelCase_ : Tuple = l_layers
UpperCAmelCase_ : Tuple = x_layers
UpperCAmelCase_ : int = r_layers
UpperCAmelCase_ : Optional[Any] = visual_feat_dim
UpperCAmelCase_ : List[Any] = visual_pos_dim
UpperCAmelCase_ : int = visual_loss_normalizer
UpperCAmelCase_ : str = task_matched
UpperCAmelCase_ : str = task_mask_lm
UpperCAmelCase_ : int = task_obj_predict
UpperCAmelCase_ : List[str] = task_qa
UpperCAmelCase_ : Optional[int] = visual_obj_loss
UpperCAmelCase_ : List[str] = visual_attr_loss
UpperCAmelCase_ : str = visual_feat_loss
UpperCAmelCase_ : List[Any] = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
super().__init__(**lowercase)
| 455 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class a__ ( __magic_name__ ):
lowercase_ = "yolos"
def __init__( self : Tuple , UpperCamelCase_ : int=768 , UpperCamelCase_ : Optional[int]=12 , UpperCamelCase_ : str=12 , UpperCamelCase_ : Union[str, Any]=3072 , UpperCamelCase_ : Any="gelu" , UpperCamelCase_ : int=0.0 , UpperCamelCase_ : Any=0.0 , UpperCamelCase_ : Optional[int]=0.02 , UpperCamelCase_ : List[str]=1e-12 , UpperCamelCase_ : Optional[int]=[512, 864] , UpperCamelCase_ : str=16 , UpperCamelCase_ : str=3 , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : str=100 , UpperCamelCase_ : str=True , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Optional[int]=1 , UpperCamelCase_ : Any=5 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : List[Any]=5 , UpperCamelCase_ : List[Any]=2 , UpperCamelCase_ : Optional[Any]=0.1 , **UpperCamelCase_ : Any , ):
"""simple docstring"""
super().__init__(**UpperCamelCase_)
__UpperCAmelCase : Tuple = hidden_size
__UpperCAmelCase : List[str] = num_hidden_layers
__UpperCAmelCase : Tuple = num_attention_heads
__UpperCAmelCase : int = intermediate_size
__UpperCAmelCase : int = hidden_act
__UpperCAmelCase : int = hidden_dropout_prob
__UpperCAmelCase : Dict = attention_probs_dropout_prob
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : int = layer_norm_eps
__UpperCAmelCase : List[str] = image_size
__UpperCAmelCase : Tuple = patch_size
__UpperCAmelCase : int = num_channels
__UpperCAmelCase : Optional[Any] = qkv_bias
__UpperCAmelCase : List[Any] = num_detection_tokens
__UpperCAmelCase : Union[str, Any] = use_mid_position_embeddings
__UpperCAmelCase : List[str] = auxiliary_loss
# Hungarian matcher
__UpperCAmelCase : Optional[Any] = class_cost
__UpperCAmelCase : Union[str, Any] = bbox_cost
__UpperCAmelCase : int = giou_cost
# Loss coefficients
__UpperCAmelCase : Optional[int] = bbox_loss_coefficient
__UpperCAmelCase : Union[str, Any] = giou_loss_coefficient
__UpperCAmelCase : Any = eos_coefficient
class a__ ( __magic_name__ ):
lowercase_ = version.parse("1.11" )
@property
def a_ ( self : int):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def a_ ( self : List[Any]):
"""simple docstring"""
return 1e-4
@property
def a_ ( self : Optional[int]):
"""simple docstring"""
return 12
| 77 |
"""simple docstring"""
from collections import namedtuple
A = namedtuple("""from_to""", """from_ to""")
A = {
"""cubicmeter""": from_to(1, 1),
"""litre""": from_to(0.001, 1_000),
"""kilolitre""": from_to(1, 1),
"""gallon""": from_to(0.00454, 264.172),
"""cubicyard""": from_to(0.76455, 1.30795),
"""cubicfoot""": from_to(0.028, 35.3147),
"""cup""": from_to(0.000236588, 4226.75),
}
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> float:
"""simple docstring"""
if from_type not in METRIC_CONVERSION:
raise ValueError(
f"Invalid 'from_type' value: {from_type!r} Supported values are:\n"
+ ", ".join(UpperCamelCase ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f"Invalid 'to_type' value: {to_type!r}. Supported values are:\n"
+ ", ".join(UpperCamelCase ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 77 | 1 |
'''simple docstring'''
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
_SCREAMING_SNAKE_CASE = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(3_2, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_2_8, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5)
_SCREAMING_SNAKE_CASE = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(6_4, 6_4), batch_size=3_2, class_mode='''binary'''
)
_SCREAMING_SNAKE_CASE = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(6_4, 6_4), batch_size=3_2, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(6_4, 6_4)
)
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.img_to_array(test_image)
_SCREAMING_SNAKE_CASE = np.expand_dims(test_image, axis=0)
_SCREAMING_SNAKE_CASE = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
_SCREAMING_SNAKE_CASE = '''Normal'''
if result[0][0] == 1:
_SCREAMING_SNAKE_CASE = '''Abnormality detected'''
| 714 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE = {'''configuration_van''': ['''VAN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VanConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''VAN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VanForImageClassification''',
'''VanModel''',
'''VanPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 56 | 0 |
def UpperCamelCase__ ( UpperCAmelCase_ ) -> int:
'''simple docstring'''
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def UpperCamelCase__ ( UpperCAmelCase_ ) -> bool:
'''simple docstring'''
_lowercase : Union[str, Any] = 0
_lowercase : Optional[int] = number
while duplicate > 0:
_lowercase , _lowercase : Optional[int] = divmod(UpperCAmelCase_ , 10 )
fact_sum += factorial(UpperCAmelCase_ )
return fact_sum == number
if __name__ == "__main__":
print('Program to check whether a number is a Krisnamurthy Number or not.')
UpperCamelCase__ = int(input('Enter number: ').strip())
print(
F"""{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number."""
) | 322 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCamelCase__ = logging.get_logger(__name__)
class UpperCAmelCase__ ( A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''pixel_values''']
def __init__( self : List[Any] , UpperCamelCase : bool = True , UpperCamelCase : Union[int, float] = 1 / 2_55 , UpperCamelCase : bool = True , UpperCamelCase : int = 8 , **UpperCamelCase : Dict , ):
"""simple docstring"""
super().__init__(**UpperCamelCase )
_lowercase : str = do_rescale
_lowercase : int = rescale_factor
_lowercase : Optional[int] = do_pad
_lowercase : Optional[int] = pad_size
def lowerCAmelCase_ ( self : int , UpperCamelCase : np.ndarray , UpperCamelCase : float , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Optional[Any] ):
"""simple docstring"""
return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def lowerCAmelCase_ ( self : List[str] , UpperCamelCase : np.ndarray , UpperCamelCase : int , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None ):
"""simple docstring"""
_lowercase , _lowercase : Dict = get_image_size(UpperCamelCase )
_lowercase : Optional[int] = (old_height // size + 1) * size - old_height
_lowercase : str = (old_width // size + 1) * size - old_width
return pad(UpperCamelCase , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=UpperCamelCase )
def lowerCAmelCase_ ( self : List[Any] , UpperCamelCase : ImageInput , UpperCamelCase : Optional[bool] = None , UpperCamelCase : Optional[float] = None , UpperCamelCase : Optional[bool] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[Union[str, TensorType]] = None , UpperCamelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase : int , ):
"""simple docstring"""
_lowercase : int = do_rescale if do_rescale is not None else self.do_rescale
_lowercase : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowercase : Optional[int] = do_pad if do_pad is not None else self.do_pad
_lowercase : Any = pad_size if pad_size is not None else self.pad_size
_lowercase : List[Any] = make_list_of_images(UpperCamelCase )
if not valid_images(UpperCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
_lowercase : Optional[int] = [to_numpy_array(UpperCamelCase ) for image in images]
if do_rescale:
_lowercase : str = [self.rescale(image=UpperCamelCase , scale=UpperCamelCase ) for image in images]
if do_pad:
_lowercase : int = [self.pad(UpperCamelCase , size=UpperCamelCase ) for image in images]
_lowercase : str = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images]
_lowercase : Optional[Any] = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase ) | 322 | 1 |
'''simple docstring'''
from collections.abc import Sequence
def _lowerCAmelCase ( __a , __a ) -> float:
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(__a ) )
def _lowerCAmelCase ( __a , __a ) -> float:
'''simple docstring'''
_UpperCamelCase :Optional[int] =0.0
for coeff in reversed(__a ):
_UpperCamelCase :Dict =result * x + coeff
return result
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = (0.0, 0.0, 5.0, 9.3, 7.0)
_lowerCamelCase : int = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x)) | 512 | '''simple docstring'''
def _lowerCAmelCase ( __a , __a ) -> float:
'''simple docstring'''
def get_matched_characters(__a , __a ) -> str:
_UpperCamelCase :Any =[]
_UpperCamelCase :List[str] =min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
_UpperCamelCase :int =int(max(0 , i - limit ) )
_UpperCamelCase :List[Any] =int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__a )
_UpperCamelCase :Optional[int] =F'''{_stra[0:_stra.index(__a )]} {_stra[_stra.index(__a ) + 1:]}'''
return "".join(__a )
# matching characters
_UpperCamelCase :str =get_matched_characters(__a , __a )
_UpperCamelCase :List[Any] =get_matched_characters(__a , __a )
_UpperCamelCase :List[str] =len(__a )
# transposition
_UpperCamelCase :Optional[Any] =(
len([(ca, ca) for ca, ca in zip(__a , __a ) if ca != ca] ) // 2
)
if not match_count:
_UpperCamelCase :List[str] =0.0
else:
_UpperCamelCase :Union[str, Any] =(
1
/ 3
* (
match_count / len(__a )
+ match_count / len(__a )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
_UpperCamelCase :int =0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("""hello""", """world""")) | 512 | 1 |
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase = 1_000_000 , __UpperCAmelCase = 10 ) -> int:
SCREAMING_SNAKE_CASE__ = defaultdict(__UpperCAmelCase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
SCREAMING_SNAKE_CASE__ = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
SCREAMING_SNAKE_CASE__ = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(__UpperCAmelCase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F'{solution() = }')
| 159 | """simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class lowerCamelCase (_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Optional[Any] , _snake_case : str ) -> List[str]:
with open(_snake_case , encoding="utf-8" ) as input_file:
SCREAMING_SNAKE_CASE__ = re.compile(r"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
SCREAMING_SNAKE_CASE__ = input_file.read()
SCREAMING_SNAKE_CASE__ = regexp.search(_snake_case )
return match
def lowerCAmelCase_ ( self : str , _snake_case : str ) -> List[str]:
with open(_snake_case , encoding="utf-8" ) as input_file:
SCREAMING_SNAKE_CASE__ = re.compile(r"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
SCREAMING_SNAKE_CASE__ = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
SCREAMING_SNAKE_CASE__ = regexp.finditer(_snake_case )
SCREAMING_SNAKE_CASE__ = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def lowerCAmelCase_ ( self : Tuple ) -> str:
SCREAMING_SNAKE_CASE__ = Path("./datasets" )
SCREAMING_SNAKE_CASE__ = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(_snake_case ) ):
raise AssertionError(F"""open(...) must use utf-8 encoding in {dataset}""" )
def lowerCAmelCase_ ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = Path("./datasets" )
SCREAMING_SNAKE_CASE__ = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(_snake_case ) ):
raise AssertionError(F"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 159 | 1 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowercase :
def __init__( self : Union[str, Any] , _lowercase : Dict , _lowercase : List[Any]=2 , _lowercase : str=True , _lowercase : Tuple=False , _lowercase : Optional[int]=10 , _lowercase : List[str]=3 , _lowercase : int=32 * 4 , _lowercase : Union[str, Any]=32 * 6 , _lowercase : Dict=4 , _lowercase : str=32 , ):
SCREAMING_SNAKE_CASE__ : List[Any] = parent
SCREAMING_SNAKE_CASE__ : str = batch_size
SCREAMING_SNAKE_CASE__ : List[str] = is_training
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_auxiliary_loss
SCREAMING_SNAKE_CASE__ : Tuple = num_queries
SCREAMING_SNAKE_CASE__ : List[Any] = num_channels
SCREAMING_SNAKE_CASE__ : int = min_size
SCREAMING_SNAKE_CASE__ : List[Any] = max_size
SCREAMING_SNAKE_CASE__ : int = num_labels
SCREAMING_SNAKE_CASE__ : Optional[int] = mask_feature_size
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : str = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_lowercase )
SCREAMING_SNAKE_CASE__ : str = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_lowercase ) > 0.5
).float()
SCREAMING_SNAKE_CASE__ : List[Any] = (torch.rand((self.batch_size, self.num_labels) , device=_lowercase ) > 0.5).long()
SCREAMING_SNAKE_CASE__ : Any = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase__ ( self : Any ):
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : Dict = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def lowercase__ ( self : Optional[Any] , _lowercase : str , _lowercase : List[str] ):
SCREAMING_SNAKE_CASE__ : List[str] = output.encoder_hidden_states
SCREAMING_SNAKE_CASE__ : Any = output.pixel_decoder_hidden_states
SCREAMING_SNAKE_CASE__ : int = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowercase ) , config.decoder_config.decoder_layers )
def lowercase__ ( self : Any , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Optional[int] , _lowercase : str=False ):
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = MaskFormerModel(config=_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = model(pixel_values=_lowercase , pixel_mask=_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_lowercase , output_hidden_states=_lowercase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_lowercase , _lowercase )
def lowercase__ ( self : Any , _lowercase : List[str] , _lowercase : str , _lowercase : str , _lowercase : str , _lowercase : str ):
SCREAMING_SNAKE_CASE__ : Tuple = MaskFormerForInstanceSegmentation(config=_lowercase )
model.to(_lowercase )
model.eval()
def comm_check_on_output(_lowercase : Optional[Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[int] = model(pixel_values=_lowercase , pixel_mask=_lowercase )
SCREAMING_SNAKE_CASE__ : str = model(_lowercase )
comm_check_on_output(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = model(
pixel_values=_lowercase , pixel_mask=_lowercase , mask_labels=_lowercase , class_labels=_lowercase )
comm_check_on_output(_lowercase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Tuple = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
lowerCamelCase : int = (
{'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
lowerCamelCase : Optional[int] = False
lowerCamelCase : Optional[int] = False
lowerCamelCase : List[Any] = False
lowerCamelCase : int = False
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : List[Any] = MaskFormerModelTester(self )
SCREAMING_SNAKE_CASE__ : Tuple = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase )
def lowercase__ ( self : List[Any] ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_lowercase , **_lowercase , output_hidden_states=_lowercase )
def lowercase__ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*_lowercase )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def lowercase__ ( self : Dict ):
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def lowercase__ ( self : List[str] ):
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def lowercase__ ( self : List[str] ):
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def lowercase__ ( self : Optional[Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowercase__ ( self : Union[str, Any] ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase__ ( self : Union[str, Any] ):
pass
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Dict = model_class(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : str = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowercase )
@slow
def lowercase__ ( self : List[Any] ):
for model_name in ["facebook/maskformer-swin-small-coco"]:
SCREAMING_SNAKE_CASE__ : int = MaskFormerModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Any = (self.model_tester.min_size,) * 2
SCREAMING_SNAKE_CASE__ : str = {
'''pixel_values''': torch.randn((2, 3, *size) , device=_lowercase ),
'''mask_labels''': torch.randn((2, 10, *size) , device=_lowercase ),
'''class_labels''': torch.zeros(2 , 10 , device=_lowercase ).long(),
}
SCREAMING_SNAKE_CASE__ : Optional[Any] = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = model(**_lowercase )
self.assertTrue(outputs.loss is not None )
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_lowercase , **_lowercase , output_hidden_states=_lowercase )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_class(_lowercase ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(**_lowercase , output_attentions=_lowercase )
self.assertTrue(outputs.attentions is not None )
def lowercase__ ( self : Optional[int] ):
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.all_model_classes[1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_class(_lowercase )
model.to(_lowercase )
model.train()
SCREAMING_SNAKE_CASE__ : Any = model(_lowercase , mask_labels=_lowercase , class_labels=_lowercase ).loss
loss.backward()
def lowercase__ ( self : Dict ):
# only MaskFormerForInstanceSegmentation has the loss
SCREAMING_SNAKE_CASE__ : Any = self.all_model_classes[1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : List[Any] = True
SCREAMING_SNAKE_CASE__ : Tuple = True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_class(_lowercase )
model.to(_lowercase )
model.train()
SCREAMING_SNAKE_CASE__ : List[str] = model(_lowercase , mask_labels=_lowercase , class_labels=_lowercase )
SCREAMING_SNAKE_CASE__ : Any = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE__ : Tuple = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
SCREAMING_SNAKE_CASE__ : int = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE__ : Dict = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_lowercase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
a_ :Optional[int] = 1e-4
def a ( ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class lowercase ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : List[Any] ):
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : int = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.default_image_processor
SCREAMING_SNAKE_CASE__ : str = prepare_img()
SCREAMING_SNAKE_CASE__ : Any = image_processor(_lowercase , return_tensors='''pt''' ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowercase , (1, 3, 8_00, 10_88) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Any = model(**_lowercase )
SCREAMING_SNAKE_CASE__ : int = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(_lowercase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _lowercase , atol=_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(_lowercase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _lowercase , atol=_lowercase ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(_lowercase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _lowercase , atol=_lowercase ) )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(_lowercase )
.eval()
)
SCREAMING_SNAKE_CASE__ : Dict = self.default_image_processor
SCREAMING_SNAKE_CASE__ : Optional[int] = prepare_img()
SCREAMING_SNAKE_CASE__ : List[str] = image_processor(_lowercase , return_tensors='''pt''' ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowercase , (1, 3, 8_00, 10_88) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[Any] = model(**_lowercase )
# masks_queries_logits
SCREAMING_SNAKE_CASE__ : List[str] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
SCREAMING_SNAKE_CASE__ : List[Any] = [
[-1.3737124, -1.7724937, -1.9364233],
[-1.5977281, -1.9867939, -2.1523695],
[-1.5795398, -1.9269832, -2.093942],
]
SCREAMING_SNAKE_CASE__ : str = torch.tensor(_lowercase ).to(_lowercase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowercase , atol=_lowercase ) )
# class_queries_logits
SCREAMING_SNAKE_CASE__ : Any = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor(
[
[1.6_512E00, -5.2_572E00, -3.3_519E00],
[3.6_169E-02, -5.9_025E00, -2.9_313E00],
[1.0_766E-04, -7.7_630E00, -5.1_263E00],
] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowercase , atol=_lowercase ) )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : int = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(_lowercase )
.eval()
)
SCREAMING_SNAKE_CASE__ : Tuple = self.default_image_processor
SCREAMING_SNAKE_CASE__ : Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE__ : List[str] = image_processor(_lowercase , return_tensors='''pt''' ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowercase , (1, 3, 8_00, 10_88) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(**_lowercase )
# masks_queries_logits
SCREAMING_SNAKE_CASE__ : List[str] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
SCREAMING_SNAKE_CASE__ : Dict = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
SCREAMING_SNAKE_CASE__ : int = torch.tensor(_lowercase ).to(_lowercase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowercase , atol=_lowercase ) )
# class_queries_logits
SCREAMING_SNAKE_CASE__ : Optional[int] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowercase , atol=_lowercase ) )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Dict = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(_lowercase )
.eval()
)
SCREAMING_SNAKE_CASE__ : Optional[int] = self.default_image_processor
SCREAMING_SNAKE_CASE__ : List[str] = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE__ : Optional[int] = inputs['''pixel_values'''].to(_lowercase )
SCREAMING_SNAKE_CASE__ : str = [el.to(_lowercase ) for el in inputs['''mask_labels''']]
SCREAMING_SNAKE_CASE__ : int = [el.to(_lowercase ) for el in inputs['''class_labels''']]
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = model(**_lowercase )
self.assertTrue(outputs.loss is not None )
| 250 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('1.0.0a'):
raise Exception('requires fairseq >= 1.0.0a')
logging.set_verbosity_info()
a_ :Tuple = logging.get_logger(__name__)
a_ :List[str] = 'Hello world! cécé herlolip'
def a ( A__ , A__ , A__ ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = FairseqRobertaModel.from_pretrained(A__ )
roberta.eval() # disable dropout
SCREAMING_SNAKE_CASE__ : List[str] = roberta.model.encoder.sentence_encoder
SCREAMING_SNAKE_CASE__ : Optional[Any] = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
SCREAMING_SNAKE_CASE__ : List[str] = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our RoBERTa config:''' , A__ )
SCREAMING_SNAKE_CASE__ : Tuple = XLMRobertaXLForSequenceClassification(A__ ) if classification_head else XLMRobertaXLForMaskedLM(A__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
SCREAMING_SNAKE_CASE__ : List[str] = roberta_sent_encoder.embed_tokens.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta_sent_encoder.embed_positions.weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
SCREAMING_SNAKE_CASE__ : int = roberta_sent_encoder.layer_norm.weight
SCREAMING_SNAKE_CASE__ : int = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
SCREAMING_SNAKE_CASE__ : BertLayer = model.roberta.encoder.layer[i]
SCREAMING_SNAKE_CASE__ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
SCREAMING_SNAKE_CASE__ : RobertaAttention = layer.attention
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta_layer.self_attn_layer_norm.weight
SCREAMING_SNAKE_CASE__ : Any = roberta_layer.self_attn_layer_norm.bias
# self attention
SCREAMING_SNAKE_CASE__ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.q_proj.weight
SCREAMING_SNAKE_CASE__ : int = roberta_layer.self_attn.q_proj.bias
SCREAMING_SNAKE_CASE__ : Any = roberta_layer.self_attn.k_proj.weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta_layer.self_attn.k_proj.bias
SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.self_attn.v_proj.weight
SCREAMING_SNAKE_CASE__ : Dict = roberta_layer.self_attn.v_proj.bias
# self-attention output
SCREAMING_SNAKE_CASE__ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
SCREAMING_SNAKE_CASE__ : str = roberta_layer.self_attn.out_proj.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.final_layer_norm.weight
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.final_layer_norm.bias
# intermediate
SCREAMING_SNAKE_CASE__ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.fca.bias
# output
SCREAMING_SNAKE_CASE__ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE__ : Any = roberta_layer.fca.bias
# end of layer
if classification_head:
SCREAMING_SNAKE_CASE__ : Dict = roberta.model.classification_heads['''mnli'''].dense.weight
SCREAMING_SNAKE_CASE__ : Any = roberta.model.classification_heads['''mnli'''].dense.bias
SCREAMING_SNAKE_CASE__ : Tuple = roberta.model.classification_heads['''mnli'''].out_proj.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
SCREAMING_SNAKE_CASE__ : str = roberta.model.encoder.lm_head.dense.weight
SCREAMING_SNAKE_CASE__ : List[str] = roberta.model.encoder.lm_head.dense.bias
SCREAMING_SNAKE_CASE__ : Tuple = roberta.model.encoder.lm_head.layer_norm.weight
SCREAMING_SNAKE_CASE__ : Dict = roberta.model.encoder.lm_head.layer_norm.bias
SCREAMING_SNAKE_CASE__ : List[str] = roberta.model.encoder.lm_head.weight
SCREAMING_SNAKE_CASE__ : Tuple = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
SCREAMING_SNAKE_CASE__ : torch.Tensor = roberta.encode(A__ ).unsqueeze(0 ) # batch of size 1
SCREAMING_SNAKE_CASE__ : List[str] = model(A__ )[0]
if classification_head:
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta.model.classification_heads['''mnli'''](roberta.extract_features(A__ ) )
else:
SCREAMING_SNAKE_CASE__ : Dict = roberta.model(A__ )[0]
print(our_output.shape , their_output.shape )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.allclose(A__ , A__ , atol=1e-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
pathlib.Path(A__ ).mkdir(parents=A__ , exist_ok=A__ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(A__ )
if __name__ == "__main__":
a_ :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
a_ :str = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 250 | 1 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase : List[Any] = logging.get_logger(__name__)
def lowerCAmelCase__ ( _a : List[Any] , _a : Dict=False ):
snake_case_ : Any = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("head" ):
snake_case_ : List[str] = "segformer.encoder." + key
if key.startswith("backbone" ):
snake_case_ : Any = key.replace("backbone" , "segformer.encoder" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
snake_case_ : Any = key[key.find("patch_embed" ) + len("patch_embed" )]
snake_case_ : Union[str, Any] = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(_a )-1}''' )
if "norm" in key:
snake_case_ : Optional[int] = key.replace("norm" , "layer_norm" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
snake_case_ : int = key[key.find("segformer.encoder.layer_norm" ) + len("segformer.encoder.layer_norm" )]
snake_case_ : List[str] = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(_a )-1}''' )
if "layer_norm1" in key:
snake_case_ : Union[str, Any] = key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
snake_case_ : Any = key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
snake_case_ : int = key[key.find("block" ) + len("block" )]
snake_case_ : Tuple = key.replace(F'''block{idx}''' , F'''block.{int(_a )-1}''' )
if "attn.q" in key:
snake_case_ : Any = key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
snake_case_ : int = key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
snake_case_ : Optional[Any] = key.replace("attn" , "attention.self" )
if "fc1" in key:
snake_case_ : Dict = key.replace("fc1" , "dense1" )
if "fc2" in key:
snake_case_ : List[str] = key.replace("fc2" , "dense2" )
if "linear_pred" in key:
snake_case_ : Union[str, Any] = key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
snake_case_ : Tuple = key.replace("linear_fuse.conv" , "linear_fuse" )
snake_case_ : int = key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
snake_case_ : List[Any] = key[key.find("linear_c" ) + len("linear_c" )]
snake_case_ : List[Any] = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(_a )-1}''' )
if key.startswith("head" ):
snake_case_ : Optional[Any] = key.replace("head" , "classifier" )
snake_case_ : int = value
return new_state_dict
def lowerCAmelCase__ ( _a : Tuple , _a : Dict ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
snake_case_ : int = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.weight''' )
snake_case_ : List[str] = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
snake_case_ : int = kv_weight[
: config.hidden_sizes[i], :
]
snake_case_ : str = kv_bias[: config.hidden_sizes[i]]
snake_case_ : Tuple = kv_weight[
config.hidden_sizes[i] :, :
]
snake_case_ : str = kv_bias[
config.hidden_sizes[i] :
]
def lowerCAmelCase__ ( ):
snake_case_ : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case_ : Optional[int] = Image.open(requests.get(_a , stream=_a ).raw )
return image
@torch.no_grad()
def lowerCAmelCase__ ( _a : Optional[int] , _a : Any , _a : Any ):
snake_case_ : List[str] = SegformerConfig()
snake_case_ : Optional[Any] = False
# set attributes based on model_name
snake_case_ : Any = "huggingface/label-files"
if "segformer" in model_name:
snake_case_ : str = model_name[len("segformer." ) : len("segformer." ) + 2]
if "ade" in model_name:
snake_case_ : Tuple = 1_50
snake_case_ : List[str] = "ade20k-id2label.json"
snake_case_ : Dict = (1, 1_50, 1_28, 1_28)
elif "city" in model_name:
snake_case_ : Optional[Any] = 19
snake_case_ : Dict = "cityscapes-id2label.json"
snake_case_ : Union[str, Any] = (1, 19, 1_28, 1_28)
else:
raise ValueError(F'''Model {model_name} not supported''' )
elif "mit" in model_name:
snake_case_ : Optional[int] = True
snake_case_ : Dict = model_name[4:6]
snake_case_ : Dict = 10_00
snake_case_ : Any = "imagenet-1k-id2label.json"
snake_case_ : Optional[int] = (1, 10_00)
else:
raise ValueError(F'''Model {model_name} not supported''' )
# set config attributes
snake_case_ : Tuple = json.load(open(hf_hub_download(_a , _a , repo_type="dataset" ) , "r" ) )
snake_case_ : Any = {int(_a ): v for k, v in idalabel.items()}
snake_case_ : Optional[int] = idalabel
snake_case_ : Any = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
snake_case_ : Optional[Any] = [64, 1_28, 3_20, 5_12]
snake_case_ : List[Any] = 2_56
elif size == "b2":
snake_case_ : List[str] = [64, 1_28, 3_20, 5_12]
snake_case_ : int = 7_68
snake_case_ : int = [3, 4, 6, 3]
elif size == "b3":
snake_case_ : Any = [64, 1_28, 3_20, 5_12]
snake_case_ : List[Any] = 7_68
snake_case_ : List[str] = [3, 4, 18, 3]
elif size == "b4":
snake_case_ : Union[str, Any] = [64, 1_28, 3_20, 5_12]
snake_case_ : int = 7_68
snake_case_ : Dict = [3, 8, 27, 3]
elif size == "b5":
snake_case_ : Any = [64, 1_28, 3_20, 5_12]
snake_case_ : List[str] = 7_68
snake_case_ : Optional[Any] = [3, 6, 40, 3]
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor (only resize + normalize)
snake_case_ : List[Any] = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_a , align=_a , do_random_crop=_a )
# prepare image
snake_case_ : Any = prepare_img()
snake_case_ : Union[str, Any] = image_processor(images=_a , return_tensors="pt" ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
if encoder_only:
snake_case_ : List[str] = torch.load(_a , map_location=torch.device("cpu" ) )
else:
snake_case_ : Dict = torch.load(_a , map_location=torch.device("cpu" ) )["state_dict"]
# rename keys
snake_case_ : Tuple = rename_keys(_a , encoder_only=_a )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(_a , _a )
# create HuggingFace model and load state dict
if encoder_only:
snake_case_ : List[str] = False
snake_case_ : Union[str, Any] = SegformerForImageClassification(_a )
else:
snake_case_ : int = SegformerForSemanticSegmentation(_a )
model.load_state_dict(_a )
model.eval()
# forward pass
snake_case_ : int = model(_a )
snake_case_ : List[Any] = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
snake_case_ : Dict = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
snake_case_ : Tuple = torch.tensor(
[
[[-7.5_820, -8.7_231, -8.3_215], [-8.0_600, -10.3_529, -10.0_304], [-7.5_208, -9.4_103, -9.6_239]],
[[-12.6_918, -13.8_994, -13.7_137], [-13.3_196, -15.7_523, -15.4_789], [-12.9_343, -14.8_757, -14.9_689]],
[[-11.1_911, -11.9_421, -11.3_243], [-11.3_342, -13.6_839, -13.3_581], [-10.3_909, -12.1_832, -12.4_858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
snake_case_ : List[Any] = torch.tensor(
[
[[-11.8_173, -14.3_850, -16.3_128], [-14.5_648, -16.5_804, -18.6_568], [-14.7_223, -15.7_387, -18.4_218]],
[[-15.7_290, -17.9_171, -19.4_423], [-18.3_105, -19.9_448, -21.4_661], [-17.9_296, -18.6_497, -20.7_910]],
[[-15.0_783, -17.0_336, -18.2_789], [-16.8_771, -18.6_870, -20.1_612], [-16.2_454, -17.1_426, -19.5_055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
snake_case_ : Dict = torch.tensor(
[
[[-9.0_878, -10.2_081, -10.1_891], [-9.3_144, -10.7_941, -10.9_843], [-9.2_294, -10.3_855, -10.5_704]],
[[-12.2_316, -13.9_068, -13.6_102], [-12.9_161, -14.3_702, -14.3_235], [-12.5_233, -13.7_174, -13.7_932]],
[[-14.6_275, -15.2_490, -14.9_727], [-14.3_400, -15.9_687, -16.2_827], [-14.1_484, -15.4_033, -15.8_937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
snake_case_ : List[str] = torch.tensor(
[
[[-12.3_144, -13.2_447, -14.0_802], [-13.3_614, -14.5_816, -15.6_117], [-13.3_340, -14.4_433, -16.2_219]],
[[-19.2_781, -20.4_128, -20.7_506], [-20.6_153, -21.6_566, -22.0_998], [-19.9_800, -21.0_430, -22.1_494]],
[[-18.8_739, -19.7_804, -21.1_834], [-20.1_233, -21.6_765, -23.2_944], [-20.0_315, -21.2_641, -23.6_944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
snake_case_ : Optional[Any] = torch.tensor(
[
[[-9.5_524, -12.0_835, -11.7_348], [-10.5_229, -13.6_446, -14.5_662], [-9.5_842, -12.8_851, -13.9_414]],
[[-15.3_432, -17.5_323, -17.0_818], [-16.3_330, -18.9_255, -19.2_101], [-15.1_340, -17.7_848, -18.3_971]],
[[-12.6_072, -14.9_486, -14.6_631], [-13.7_629, -17.0_907, -17.7_745], [-12.7_899, -16.1_695, -17.1_671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
snake_case_ : Optional[Any] = torch.tensor(
[
[[-11.9_295, -13.4_057, -14.8_106], [-13.3_431, -14.8_179, -15.3_781], [-14.2_836, -15.5_942, -16.1_588]],
[[-11.4_906, -12.8_067, -13.6_564], [-13.1_189, -14.0_500, -14.1_543], [-13.8_748, -14.5_136, -14.8_789]],
[[0.5_374, 0.1_067, -0.4_742], [0.1_141, -0.2_255, -0.7_099], [-0.3_000, -0.5_924, -1.3_105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
snake_case_ : Union[str, Any] = torch.tensor(
[
[[-7.8_217, -9.8_767, -10.1_717], [-9.4_438, -10.9_058, -11.4_047], [-9.7_939, -12.3_495, -12.1_079]],
[[-7.1_514, -9.5_336, -10.0_860], [-9.7_776, -11.6_822, -11.8_439], [-10.1_411, -12.7_655, -12.8_972]],
[[0.3_021, 0.0_805, -0.2_310], [-0.0_328, -0.1_605, -0.2_714], [-0.1_408, -0.5_477, -0.6_976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
snake_case_ : Tuple = torch.tensor(
[
[
[-1.1_372E01, -1.2_787E01, -1.3_477E01],
[-1.2_536E01, -1.4_194E01, -1.4_409E01],
[-1.3_217E01, -1.4_888E01, -1.5_327E01],
],
[
[-1.4_791E01, -1.7_122E01, -1.8_277E01],
[-1.7_163E01, -1.9_192E01, -1.9_533E01],
[-1.7_897E01, -1.9_991E01, -2.0_315E01],
],
[
[7.6_723E-01, 4.1_921E-01, -7.7_878E-02],
[4.7_772E-01, 9.5_557E-03, -2.8_082E-01],
[3.6_032E-01, -2.4_826E-01, -5.1_168E-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
snake_case_ : List[Any] = torch.tensor(
[
[[-9.4_959, -11.3_087, -11.7_479], [-11.0_025, -12.6_540, -12.3_319], [-11.4_064, -13.0_487, -12.9_905]],
[[-9.8_905, -11.3_084, -12.0_854], [-11.1_726, -12.7_698, -12.9_583], [-11.5_985, -13.3_278, -14.1_774]],
[[0.2_213, 0.0_192, -0.2_466], [-0.1_731, -0.4_213, -0.4_874], [-0.3_126, -0.6_541, -1.1_389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
snake_case_ : Optional[int] = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
snake_case_ : Tuple = torch.tensor(
[
[[-16.0_976, -16.4_856, -17.3_962], [-16.6_234, -19.0_342, -19.7_685], [-16.0_900, -18.0_661, -19.1_180]],
[[-18.4_750, -18.8_488, -19.5_074], [-19.4_030, -22.1_570, -22.5_977], [-19.1_191, -20.8_486, -22.3_783]],
[[-4.5_178, -5.5_037, -6.5_109], [-5.0_884, -7.2_174, -8.0_334], [-4.4_156, -5.8_117, -7.2_970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
snake_case_ : List[str] = torch.tensor(
[
[[-14.2_081, -14.4_732, -14.1_977], [-14.5_867, -16.4_423, -16.6_356], [-13.4_441, -14.9_685, -16.8_696]],
[[-14.4_576, -14.7_073, -15.0_451], [-15.0_816, -17.6_237, -17.9_873], [-14.4_213, -16.0_199, -18.5_992]],
[[-4.7_349, -4.9_588, -5.0_966], [-4.3_210, -6.9_325, -7.2_591], [-3.4_312, -4.7_484, -7.1_917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
snake_case_ : List[str] = torch.tensor(
[
[[-11.7_737, -11.9_526, -11.3_273], [-13.6_692, -14.4_574, -13.8_878], [-13.8_937, -14.6_924, -15.9_345]],
[[-14.6_706, -14.5_330, -14.1_306], [-16.1_502, -16.8_180, -16.4_269], [-16.8_338, -17.8_939, -20.1_746]],
[[1.0_491, 0.8_289, 1.0_310], [1.1_044, 0.5_219, 0.8_055], [1.0_899, 0.6_926, 0.5_590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
snake_case_ : Optional[int] = torch.tensor(
[
[[-12.5_641, -13.4_777, -13.0_684], [-13.9_587, -15.8_983, -16.6_557], [-13.3_109, -15.7_350, -16.3_141]],
[[-14.7_074, -15.4_352, -14.5_944], [-16.6_353, -18.1_663, -18.6_120], [-15.1_702, -18.0_329, -18.1_547]],
[[-1.7_990, -2.0_951, -1.7_784], [-2.6_397, -3.8_245, -3.9_686], [-1.5_264, -2.8_126, -2.9_316]],
] )
else:
snake_case_ : Optional[int] = logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , _a , atol=1E-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_a ).mkdir(exist_ok=_a )
model.save_pretrained(_a )
image_processor.save_pretrained(_a )
if __name__ == "__main__":
lowercase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''segformer.b0.512x512.ade.160k''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowercase : int = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 568 |
def lowerCAmelCase__ ( _a : str ):
snake_case_ : List[Any] = ""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def lowerCAmelCase__ ( _a : str ):
snake_case_ : Optional[int] = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
snake_case_ : Optional[Any] = remove_duplicates(key.upper() )
snake_case_ : Union[str, Any] = len(_a )
# First fill cipher with key characters
snake_case_ : Union[str, Any] = {alphabet[i]: char for i, char in enumerate(_a )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_a ) , 26 ):
snake_case_ : Any = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
snake_case_ : Dict = alphabet[i - offset]
snake_case_ : Optional[Any] = char
return cipher_alphabet
def lowerCAmelCase__ ( _a : str , _a : dict[str, str] ):
return "".join(cipher_map.get(_a , _a ) for ch in message.upper() )
def lowerCAmelCase__ ( _a : str , _a : dict[str, str] ):
snake_case_ : Optional[int] = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_a , _a ) for ch in message.upper() )
def lowerCAmelCase__ ( ):
snake_case_ : Optional[Any] = input("Enter message to encode or decode: " ).strip()
snake_case_ : Dict = input("Enter keyword: " ).strip()
snake_case_ : Optional[Any] = input("Encipher or decipher? E/D:" ).strip()[0].lower()
try:
snake_case_ : int = {"e": encipher, "d": decipher}[option]
except KeyError:
raise KeyError("invalid input option" )
snake_case_ : Any = create_cipher_map(_a )
print(func(_a , _a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 568 | 1 |
'''simple docstring'''
import math
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
__a = len(__SCREAMING_SNAKE_CASE )
__a = int(math.floor(math.sqrt(__SCREAMING_SNAKE_CASE ) ) )
__a = 0
while arr[min(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) - 1] < x:
__a = step
step += int(math.floor(math.sqrt(__SCREAMING_SNAKE_CASE ) ) )
if prev >= n:
return -1
while arr[prev] < x:
__a = prev + 1
if prev == min(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = input('Enter numbers separated by a comma:\n').strip()
SCREAMING_SNAKE_CASE_ = [int(item) for item in user_input.split(',')]
SCREAMING_SNAKE_CASE_ = int(input('Enter the number to be searched:\n'))
SCREAMING_SNAKE_CASE_ = jump_search(arr, x)
if res == -1:
print('Number not found!')
else:
print(f"""Number {x} is at index {res}""")
| 709 |
'''simple docstring'''
def __lowercase ( __SCREAMING_SNAKE_CASE = 100_0000 ) -> int:
"""simple docstring"""
__a = 1
__a = 1
__a = {1: 1}
for inputa in range(2 , __SCREAMING_SNAKE_CASE ):
__a = 0
__a = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__a = (3 * number) + 1
counter += 1
if inputa not in counters:
__a = counter
if counter > pre_counter:
__a = inputa
__a = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 201 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase_ : List[str] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( snake_case_ ):
__magic_name__ : str = ['''pixel_values''']
def __init__( self : List[str] , lowercase__ : bool = True , lowercase__ : Dict[str, int] = None , lowercase__ : PILImageResampling = PILImageResampling.BICUBIC , lowercase__ : bool = True , lowercase__ : Union[int, float] = 1 / 255 , lowercase__ : bool = True , lowercase__ : Optional[Union[float, List[float]]] = None , lowercase__ : Optional[Union[float, List[float]]] = None , lowercase__ : bool = True , **lowercase__ : str , ):
'''simple docstring'''
super().__init__(**lowercase__ )
a_ : Tuple = size if size is not None else {"""height""": 384, """width""": 384}
a_ : Tuple = get_size_dict(lowercase__ , default_to_square=lowercase__ )
a_ : Tuple = do_resize
a_ : Tuple = size
a_ : Optional[Any] = resample
a_ : int = do_rescale
a_ : int = rescale_factor
a_ : Optional[int] = do_normalize
a_ : Tuple = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
a_ : List[str] = image_std if image_std is not None else OPENAI_CLIP_STD
a_ : Optional[Any] = do_convert_rgb
def lowercase_ ( self : int , lowercase__ : np.ndarray , lowercase__ : Dict[str, int] , lowercase__ : PILImageResampling = PILImageResampling.BICUBIC , lowercase__ : Optional[Union[str, ChannelDimension]] = None , **lowercase__ : Optional[int] , ):
'''simple docstring'''
a_ : Any = get_size_dict(lowercase__ , default_to_square=lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}" )
a_ : Optional[Any] = (size["""height"""], size["""width"""])
return resize(lowercase__ , size=lowercase__ , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
def lowercase_ ( self : Optional[int] , lowercase__ : np.ndarray , lowercase__ : Union[int, float] , lowercase__ : Optional[Union[str, ChannelDimension]] = None , **lowercase__ : List[str] , ):
'''simple docstring'''
return rescale(lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ )
def lowercase_ ( self : Tuple , lowercase__ : np.ndarray , lowercase__ : Union[float, List[float]] , lowercase__ : Union[float, List[float]] , lowercase__ : Optional[Union[str, ChannelDimension]] = None , **lowercase__ : List[Any] , ):
'''simple docstring'''
return normalize(lowercase__ , mean=lowercase__ , std=lowercase__ , data_format=lowercase__ , **lowercase__ )
def lowercase_ ( self : List[Any] , lowercase__ : ImageInput , lowercase__ : Optional[bool] = None , lowercase__ : Optional[Dict[str, int]] = None , lowercase__ : PILImageResampling = None , lowercase__ : Optional[bool] = None , lowercase__ : Optional[float] = None , lowercase__ : Optional[bool] = None , lowercase__ : Optional[Union[float, List[float]]] = None , lowercase__ : Optional[Union[float, List[float]]] = None , lowercase__ : Optional[Union[str, TensorType]] = None , lowercase__ : bool = None , lowercase__ : ChannelDimension = ChannelDimension.FIRST , **lowercase__ : Union[str, Any] , ):
'''simple docstring'''
a_ : List[Any] = do_resize if do_resize is not None else self.do_resize
a_ : int = resample if resample is not None else self.resample
a_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
a_ : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
a_ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
a_ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
a_ : int = image_std if image_std is not None else self.image_std
a_ : Optional[int] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
a_ : List[str] = size if size is not None else self.size
a_ : Tuple = get_size_dict(lowercase__ , default_to_square=lowercase__ )
a_ : Any = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
a_ : Tuple = [convert_to_rgb(lowercase__ ) for image in images]
# All transformations expect numpy arrays.
a_ : List[str] = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
a_ : int = [self.resize(image=lowercase__ , size=lowercase__ , resample=lowercase__ ) for image in images]
if do_rescale:
a_ : Any = [self.rescale(image=lowercase__ , scale=lowercase__ ) for image in images]
if do_normalize:
a_ : Union[str, Any] = [self.normalize(image=lowercase__ , mean=lowercase__ , std=lowercase__ ) for image in images]
a_ : str = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
a_ : Optional[int] = BatchFeature(data={"""pixel_values""": images} , tensor_type=lowercase__ )
return encoded_outputs
| 442 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
lowerCAmelCase_ : int = {'target_lang': 'fi', 'source_lang': 'en'}
lowerCAmelCase_ : str = '>>zh<<'
lowerCAmelCase_ : List[str] = 'Helsinki-NLP/'
if is_torch_available():
lowerCAmelCase_ : Dict = 'pt'
elif is_tf_available():
lowerCAmelCase_ : Union[str, Any] = 'tf'
else:
lowerCAmelCase_ : int = 'jax'
@require_sentencepiece
class SCREAMING_SNAKE_CASE ( snake_case_ , unittest.TestCase ):
__magic_name__ : Dict = MarianTokenizer
__magic_name__ : Any = False
__magic_name__ : str = True
def lowercase_ ( self : Any ):
'''simple docstring'''
super().setUp()
a_ : Optional[Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
a_ : Optional[int] = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
a_ : List[str] = Path(self.tmpdirname )
save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
a_ : Dict = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self : Tuple , **lowercase__ : int ):
'''simple docstring'''
return MarianTokenizer.from_pretrained(self.tmpdirname , **lowercase__ )
def lowercase_ ( self : int , lowercase__ : int ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def lowercase_ ( self : List[str] ):
'''simple docstring'''
a_ : Optional[int] = """</s>"""
a_ : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__ ) , lowercase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__ ) , lowercase__ )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
a_ : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(lowercase__ ) , 9 )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def lowercase_ ( self : str ):
'''simple docstring'''
a_ : str = MarianTokenizer.from_pretrained(F"{ORG_NAME}opus-mt-en-de" )
a_ : Any = en_de_tokenizer(["""I am a small frog"""] , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
a_ : str = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(lowercase__ , batch.input_ids[0] )
a_ : Union[str, Any] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(lowercase__ )
a_ : Union[str, Any] = [x.name for x in Path(lowercase__ ).glob("""*""" )]
self.assertIn("""source.spm""" , lowercase__ )
MarianTokenizer.from_pretrained(lowercase__ )
def lowercase_ ( self : str ):
'''simple docstring'''
a_ : int = self.get_tokenizer()
a_ : Dict = tok(
["""I am a small frog""" * 1000, """I am a small frog"""] , padding=lowercase__ , truncation=lowercase__ , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
a_ : List[str] = self.get_tokenizer()
a_ : Dict = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=lowercase__ , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
a_ : Optional[int] = {"""input_ids""": [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase__ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def lowercase_ ( self : int ):
'''simple docstring'''
a_ : Tuple = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
a_ : Tuple = """Tämä on testi"""
a_ : Union[str, Any] = """This is a test"""
a_ : Union[str, Any] = [76, 7, 2047, 2]
a_ : Optional[int] = [69, 12, 11, 940, 2]
a_ : Optional[Any] = tokenizer(lowercase__ ).input_ids
self.assertListEqual(lowercase__ , lowercase__ )
a_ : Optional[int] = tokenizer(text_target=lowercase__ ).input_ids
self.assertListEqual(lowercase__ , lowercase__ )
a_ : str = tokenizer.decode(lowercase__ , skip_special_tokens=lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
| 442 | 1 |
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class UpperCAmelCase ( __snake_case ):
a: torch.FloatTensor
a: Optional[torch.FloatTensor] = None
def __snake_case ( _UpperCamelCase , _UpperCamelCase=0.9_99 , _UpperCamelCase="cosine" , ) -> List[Any]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(_UpperCamelCase ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_UpperCamelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}" )
_a = []
for i in range(_UpperCamelCase ):
_a = i / num_diffusion_timesteps
_a = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_UpperCamelCase ) / alpha_bar_fn(_UpperCamelCase ) , _UpperCamelCase ) )
return torch.tensor(_UpperCamelCase , dtype=torch.floataa )
class UpperCAmelCase ( __snake_case , __snake_case ):
a: Any = 1
@register_to_config
def __init__( self: str , __UpperCamelCase: int = 1000 , __UpperCamelCase: float = 0.0_0_0_1 , __UpperCamelCase: float = 0.0_2 , __UpperCamelCase: str = "linear" , __UpperCamelCase: Optional[Union[np.ndarray, List[float]]] = None , __UpperCamelCase: bool = True , __UpperCamelCase: bool = True , __UpperCamelCase: int = 0 , __UpperCamelCase: str = "epsilon" , __UpperCamelCase: float = 1.0 , **__UpperCamelCase: Any , ):
if kwargs.get('''set_alpha_to_one''' , __UpperCamelCase ) is not None:
_a = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''' , '''1.0.0''' , __UpperCamelCase , standard_warn=__UpperCamelCase )
_a = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
_a = torch.tensor(__UpperCamelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
_a = torch.linspace(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_a = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __UpperCamelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_a = betas_for_alpha_bar(__UpperCamelCase )
else:
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}" )
_a = 1.0 - self.betas
_a = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
_a = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
_a = 1.0
# setable values
_a = None
_a = torch.from_numpy(np.arange(0 , __UpperCamelCase ).copy().astype(np.intaa ) )
def _A ( self: List[str] , __UpperCamelCase: torch.FloatTensor , __UpperCamelCase: Optional[int] = None ):
return sample
def _A ( self: Dict , __UpperCamelCase: int , __UpperCamelCase: Union[str, torch.device] = None ):
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"
f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"
f" maximal {self.config.num_train_timesteps} timesteps." )
_a = num_inference_steps
_a = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a = (np.arange(0 , __UpperCamelCase ) * step_ratio).round().copy().astype(np.intaa )
_a = torch.from_numpy(__UpperCamelCase ).to(__UpperCamelCase )
self.timesteps += self.config.steps_offset
def _A ( self: Any , __UpperCamelCase: torch.FloatTensor , __UpperCamelCase: int , __UpperCamelCase: torch.FloatTensor , __UpperCamelCase: float = 0.0 , __UpperCamelCase: bool = False , __UpperCamelCase: Optional[torch.FloatTensor] = None , __UpperCamelCase: bool = True , ):
# 1. get previous step value (=t+1)
_a = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
_a = self.alphas_cumprod[timestep]
_a = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
_a = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
_a = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
_a = model_output
elif self.config.prediction_type == "sample":
_a = model_output
_a = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
_a = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
_a = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
_a = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=__UpperCamelCase , pred_original_sample=__UpperCamelCase )
def __len__( self: str ):
return self.config.num_train_timesteps
| 346 |
from collections.abc import Generator
from math import sin
def __snake_case ( _UpperCamelCase ) -> bytes:
if len(_UpperCamelCase ) != 32:
raise ValueError('''Input must be of length 32''' )
_a = b''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def __snake_case ( _UpperCamelCase ) -> bytes:
if i < 0:
raise ValueError('''Input must be non-negative''' )
_a = format(_UpperCamelCase , '''08x''' )[-8:]
_a = b''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' )
return little_endian_hex
def __snake_case ( _UpperCamelCase ) -> bytes:
_a = b''''''
for char in message:
bit_string += format(_UpperCamelCase , '''08b''' ).encode('''utf-8''' )
_a = format(len(_UpperCamelCase ) , '''064b''' ).encode('''utf-8''' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(_UpperCamelCase ) % 5_12 != 4_48:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def __snake_case ( _UpperCamelCase ) -> Generator[list[int], None, None]:
if len(_UpperCamelCase ) % 5_12 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''' )
for pos in range(0 , len(_UpperCamelCase ) , 5_12 ):
_a = bit_string[pos : pos + 5_12]
_a = []
for i in range(0 , 5_12 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def __snake_case ( _UpperCamelCase ) -> int:
if i < 0:
raise ValueError('''Input must be non-negative''' )
_a = format(_UpperCamelCase , '''032b''' )
_a = ''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(_UpperCamelCase , 2 )
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> int:
return (a + b) % 2**32
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> int:
if i < 0:
raise ValueError('''Input must be non-negative''' )
if shift < 0:
raise ValueError('''Shift must be non-negative''' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def __snake_case ( _UpperCamelCase ) -> bytes:
_a = preprocess(_UpperCamelCase )
_a = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
_a = 0X67_45_23_01
_a = 0XEF_CD_AB_89
_a = 0X98_BA_DC_FE
_a = 0X10_32_54_76
_a = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(_UpperCamelCase ):
_a = aa
_a = ba
_a = ca
_a = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_a = d ^ (b & (c ^ d))
_a = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_a = c ^ (d & (b ^ c))
_a = (5 * i + 1) % 16
elif i <= 47:
_a = b ^ c ^ d
_a = (3 * i + 5) % 16
else:
_a = c ^ (b | not_aa(_UpperCamelCase ))
_a = (7 * i) % 16
_a = (f + a + added_consts[i] + block_words[g]) % 2**32
_a = d
_a = c
_a = b
_a = sum_aa(_UpperCamelCase , left_rotate_aa(_UpperCamelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
_a = sum_aa(_UpperCamelCase , _UpperCamelCase )
_a = sum_aa(_UpperCamelCase , _UpperCamelCase )
_a = sum_aa(_UpperCamelCase , _UpperCamelCase )
_a = sum_aa(_UpperCamelCase , _UpperCamelCase )
_a = reformat_hex(_UpperCamelCase ) + reformat_hex(_UpperCamelCase ) + reformat_hex(_UpperCamelCase ) + reformat_hex(_UpperCamelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 346 | 1 |
'''simple docstring'''
import numpy as np
def a_ ( _UpperCAmelCase : np.ndarray ) -> np.ndarray:
return 1 / (1 + np.exp(-vector ))
def a_ ( _UpperCAmelCase : np.ndarray ) -> np.ndarray:
return vector * sigmoid(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 286 |
'''simple docstring'''
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
A__ = WavaVecaPhonemeCTCTokenizer
A__ = False
def A_ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
__snake_case : Optional[Any] = (
'<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '
'ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '
'ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '
'oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '
'pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '
'yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '
'əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '
'ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '
'ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '
'uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '
'ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '
'ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '
'ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'
).split(' ' )
__snake_case : Tuple = dict(zip(__a , range(len(__a ) ) ) )
__snake_case : List[str] = {'pad_token': '<pad>', 'unk_token': '<unk>', 'bos_token': '<s>', 'eos_token': '</s>'}
__snake_case : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__a ) + '\n' )
def A_ ( self : Tuple , __a : Any , __a : str=False , __a : Tuple=20 , __a : int=5 ) -> Tuple[str, list]:
'''simple docstring'''
__snake_case : Any = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=__a )) for i in range(len(__a ) )]
__snake_case : Optional[int] = list(filter(lambda __a : [t[0]] == tokenizer.encode(t[1] , do_phonemize=__a ) , __a ) )
if max_length is not None and len(__a ) > max_length:
__snake_case : List[str] = toks[:max_length]
if min_length is not None and len(__a ) < min_length and len(__a ) > 0:
while len(__a ) < min_length:
__snake_case : Optional[int] = toks + toks
# toks_str = [t[1] for t in toks]
__snake_case : List[Any] = [t[0] for t in toks]
# Ensure consistency
__snake_case : Optional[Any] = tokenizer.decode(__a , clean_up_tokenization_spaces=__a )
if " " not in output_txt and len(__a ) > 1:
__snake_case : Tuple = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__a )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__a )
)
if with_prefix_space:
__snake_case : Tuple = ' ' + output_txt
__snake_case : Optional[Any] = tokenizer.encode(__a , add_special_tokens=__a )
return output_txt, output_ids
def A_ ( self : Union[str, Any] , **__a : str ) -> int:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **__a )
def A_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Optional[Any] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
# check adding a single token
tokenizer.add_tokens('xxx' )
__snake_case : Optional[Any] = tokenizer('m xxx ɪ' , do_phonemize=__a ).input_ids
self.assertEqual(__a , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(['aaa', 'bbb', 'ccc'] )
__snake_case : Union[str, Any] = tokenizer('m aaa ɪ ccc' , do_phonemize=__a ).input_ids
self.assertEqual(__a , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
__snake_case : Dict = tokenizer('maɪ c' , do_phonemize=__a ).input_ids
self.assertEqual(__a , [3, 200] ) # mai should be <unk> (=3)
def A_ ( self : Any ) -> str:
'''simple docstring'''
__snake_case : List[Any] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
__snake_case : List[str] = 'Hello how are you'
__snake_case : Dict = tokenizer.phonemize(__a , phonemizer_lang='en-us' )
self.assertEqual(__a , 'h ə l oʊ h aʊ ɑːɹ j uː' )
def A_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
__snake_case : Dict = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
__snake_case : Optional[Any] = 'Hello how are you'
__snake_case : List[str] = tokenizer.phonemize(__a , phonemizer_lang='en-us' )
self.assertEqual(tokenizer(__a ).input_ids , tokenizer(__a , do_phonemize=__a ).input_ids )
def A_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
__snake_case : Optional[int] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
__snake_case : Tuple = 'Hello how are you'
__snake_case : Tuple = tokenizer.phonemize(__a , phonemizer_lang='en-us' )
__snake_case : str = tokenizer.decode(tokenizer(__a ).input_ids )
self.assertEqual(__a , __a )
def A_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
__snake_case : Union[str, Any] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
__snake_case : Union[str, Any] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
__snake_case : Tuple = tokenizer.decode(sample_ids[0] )
__snake_case : str = tokenizer.batch_decode(__a )
self.assertEqual(__a , batch_tokens[0] )
self.assertEqual(__a , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] )
def A_ ( self : Tuple ) -> str:
'''simple docstring'''
__snake_case : Optional[Any] = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
__snake_case : Optional[Any] = 'Hello how are you'
__snake_case : Union[str, Any] = tokenizer.phonemize(__a , phonemizer_lang='en-us' )
self.assertEqual(__a , 'h ə l oʊ | h aʊ | ɑːɹ | j uː |' )
def A_ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
__snake_case : Any = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
__snake_case : Tuple = 'Hello how are you'
__snake_case : List[str] = tokenizer.phonemize(__a , phonemizer_lang='en-us' )
self.assertEqual(tokenizer(__a ).input_ids , tokenizer(__a , do_phonemize=__a ).input_ids )
def A_ ( self : Tuple ) -> Dict:
'''simple docstring'''
__snake_case : List[Any] = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
# fmt: off
__snake_case : int = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
__snake_case : Dict = tokenizer.decode(sample_ids[0] )
__snake_case : Tuple = tokenizer.batch_decode(__a )
self.assertEqual(__a , batch_tokens[0] )
self.assertEqual(__a , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] )
# decode with no word_del_token filter
__snake_case : Union[str, Any] = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=__a )
__snake_case : Optional[int] = tokenizer.batch_decode(__a , filter_word_delimiter_token=__a )
self.assertEqual(__a , batch_tokens[0] )
self.assertEqual(__a , ['k s ɾ | ɾ l | ɭʲ', '| j ð | s j ð s oːɹ'] )
def A_ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
__snake_case : Dict = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
__snake_case : Any = 'Hello how are you'
__snake_case : Optional[int] = tokenizer.phonemize(__a , phonemizer_lang='en-us' )
__snake_case : Union[str, Any] = tokenizer.decode(tokenizer(__a ).input_ids , filter_word_delimiter_token=__a )
self.assertEqual(__a , __a )
def A_ ( self : Dict ) -> List[str]:
'''simple docstring'''
__snake_case : Optional[Any] = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
__snake_case : Optional[int] = 'Hello how are you'
__snake_case : List[Any] = tokenizer.phonemize(__a , phonemizer_lang='en-us' )
__snake_case : Union[str, Any] = tokenizer.decode(tokenizer(__a ).input_ids , filter_word_delimiter_token=__a )
self.assertEqual(' '.join([p.strip() for p in phonemes.split(' |' )] ).strip() , __a )
def A_ ( self : int ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token=__a )
__snake_case : Any = 'Hello how are you'
__snake_case : Union[str, Any] = tokenizer(__a , phonemizer_lang='en-us' ).input_ids
__snake_case : Union[str, Any] = tokenizer(__a , phonemizer_lang='fr-fr' ).input_ids
self.assertNotEqual(__a , __a )
__snake_case : str = tokenizer.decode(__a )
__snake_case : int = tokenizer.decode(__a )
self.assertEqual(__a , 'h ə l oʊ h aʊ ɑːɹ j uː' )
self.assertEqual(__a , 'ɛ l o h aʊ a ʁ j u' )
def A_ ( self : str ) -> str:
'''simple docstring'''
__snake_case : Dict = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
__snake_case : List[str] = 'Hello how Are you'
__snake_case : Optional[Any] = 'hello how are you'
__snake_case : Union[str, Any] = tokenizer(__a ).input_ids
__snake_case : Any = tokenizer(__a ).input_ids
self.assertEqual(__a , __a )
def A_ ( self : List[Any] ) -> Any:
'''simple docstring'''
__snake_case : Tuple = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
tokenizer.add_tokens(['!', '?'] )
tokenizer.add_special_tokens({'cls_token': '$$$'} )
# fmt: off
__snake_case : List[Any] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
__snake_case : str = tokenizer.batch_decode(__a )
self.assertEqual(__a , ['k s ɾ ɾ l ɭʲ!?!? $$$', 'j ð s j ð s oːɹ $$$'] )
@staticmethod
def A_ ( __a : Any , __a : Dict ) -> Tuple:
'''simple docstring'''
__snake_case : str = [d[key] for d in offsets]
return retrieved_list
def A_ ( self : str ) -> Optional[int]:
'''simple docstring'''
__snake_case : Optional[int] = self.get_tokenizer(word_delimiter_token='|' )
tokenizer.add_tokens('|' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
__snake_case : int = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
__snake_case : Any = tokenizer.decode(__a , output_char_offsets=__a , filter_word_delimiter_token=__a )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue('text' in outputs )
self.assertTrue('char_offsets' in outputs )
self.assertTrue(isinstance(__a , __a ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(' '.join(self.get_from_offsets(outputs['char_offsets'] , 'char' ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'char' ) , ['k', 's', 'ɾ', 'ɾ', '|', 'ɾ', 'l', '|', 'ɭʲ'] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'start_offset' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'end_offset' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def A_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
__snake_case : Union[str, Any] = self.get_tokenizer(word_delimiter_token='|' )
def check_list_tuples_equal(__a : int , __a : Union[str, Any] ):
self.assertTrue(isinstance(__a , __a ) )
self.assertTrue(isinstance(outputs_list[0] , __a ) )
# transform list to ModelOutput
__snake_case : Optional[int] = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['text'] , outputs_batch_a['text'] )
def recursive_check(__a : Any , __a : str ):
if isinstance(__a , __a ):
[recursive_check(__a , __a ) for la, la in zip(__a , __a )]
self.assertEqual(__a , __a )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['char_offsets'] , outputs_batch_a['char_offsets'] )
# fmt: off
__snake_case : int = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
__snake_case : List[str] = tokenizer.batch_decode(__a , output_char_offsets=__a )
__snake_case : str = [tokenizer.decode(__a , output_char_offsets=__a ) for ids in sample_ids]
check_list_tuples_equal(__a , __a )
@unittest.skip('Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes' )
def A_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip('Wav2Vec2PhonemeTokenizer always puts spaces between phonemes' )
def A_ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip('encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency' )
def A_ ( self : str ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('Wav2Vec2PhonemeModel has no max model length => no testing' )
def A_ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
def A_ ( self : Optional[int] ) -> str:
'''simple docstring'''
__snake_case : int = self.get_tokenizers(do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__snake_case : int = tokenizer.vocab_size
__snake_case : List[Any] = len(__a )
self.assertNotEqual(__a , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__snake_case : Optional[Any] = ['aaaaa bbbbbb', 'cccccccccdddddddd']
__snake_case : Optional[int] = tokenizer.add_tokens(__a )
__snake_case : Optional[int] = tokenizer.vocab_size
__snake_case : Tuple = len(__a )
self.assertNotEqual(__a , 0 )
self.assertEqual(__a , __a )
self.assertEqual(__a , len(__a ) )
self.assertEqual(__a , all_size + len(__a ) )
__snake_case : Optional[int] = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=__a )
self.assertGreaterEqual(len(__a ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
__snake_case : Tuple = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
__snake_case : Optional[Any] = tokenizer.add_special_tokens(__a )
__snake_case : int = tokenizer.vocab_size
__snake_case : Any = len(__a )
self.assertNotEqual(__a , 0 )
self.assertEqual(__a , __a )
self.assertEqual(__a , len(__a ) )
self.assertEqual(__a , all_size_a + len(__a ) )
__snake_case : List[str] = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=__a )
self.assertGreaterEqual(len(__a ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' )
def A_ ( self : Dict ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' )
def A_ ( self : str ) -> Any:
'''simple docstring'''
pass
def A_ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
__snake_case : Optional[int] = self.get_tokenizers(fast=__a , do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__snake_case : Union[str, Any] = ['ð', 'ɪ', 's', 'ɪ', 'z', 'ɐ', 't', 'ɛ', 'k', 's', 't']
__snake_case : List[Any] = tokenizer.convert_tokens_to_string(__a )
self.assertIsInstance(output['text'] , __a )
| 286 | 1 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = R"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
"""
class _lowerCAmelCase ( _lowercase ):
@add_start_docstrings(__UpperCAmelCase )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
raise NotImplementedError('''StoppingCriteria needs to be subclassed''' )
class _lowerCAmelCase ( _lowercase ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = None ):
lowerCAmelCase__ : Dict = max_length
lowerCAmelCase__ : Optional[int] = max_position_embeddings
@add_start_docstrings(__UpperCAmelCase )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
lowerCAmelCase__ : Dict = input_ids.shape[-1]
lowerCAmelCase__ : Any = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'''This is a friendly reminder - the current text generation call will exceed the model\'s predefined '''
f"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """
'''exceptions, performance degradation, or nothing at all.''' )
return is_done
class _lowerCAmelCase ( _lowercase ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
warnings.warn(
'''The class `MaxNewTokensCriteria` is deprecated. '''
f"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """
'''with `max_length = start_length + max_new_tokens` instead.''' , __UpperCAmelCase , )
lowerCAmelCase__ : int = start_length
lowerCAmelCase__ : List[str] = max_new_tokens
lowerCAmelCase__ : Dict = start_length + max_new_tokens
@add_start_docstrings(__UpperCAmelCase )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
return input_ids.shape[-1] >= self.max_length
class _lowerCAmelCase ( _lowercase ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = None ):
lowerCAmelCase__ : Any = max_time
lowerCAmelCase__ : int = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(__UpperCAmelCase )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
return time.time() - self.initial_timestamp > self.max_time
class _lowerCAmelCase ( _lowercase ):
@add_start_docstrings(__UpperCAmelCase )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
return any(criteria(__UpperCAmelCase , __UpperCAmelCase ) for criteria in self )
@property
def __magic_name__( self ):
for stopping_criterium in self:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return stopping_criterium.max_length
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return stopping_criterium.max_length
return None
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> StoppingCriteriaList:
lowerCAmelCase__ : Tuple = stopping_criteria.max_length
lowerCAmelCase__ : Optional[int] = deepcopy(SCREAMING_SNAKE_CASE_ )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('''You set different `max_length` for stopping criteria and `max_length` parameter''' , SCREAMING_SNAKE_CASE_ )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=SCREAMING_SNAKE_CASE_ ) )
return new_stopping_criteria
| 703 |
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class _lowerCAmelCase :
def __init__( self , __UpperCAmelCase ):
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
lowerCAmelCase__ : Union[str, Any] = deepcopy(__UpperCAmelCase )
elif os.path.exists(__UpperCAmelCase ):
with io.open(__UpperCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
lowerCAmelCase__ : int = json.load(__UpperCAmelCase )
else:
try:
lowerCAmelCase__ : Union[str, Any] = baseaa.urlsafe_baadecode(__UpperCAmelCase ).decode('''utf-8''' )
lowerCAmelCase__ : Optional[int] = json.loads(__UpperCAmelCase )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" )
lowerCAmelCase__ : Optional[Any] = config
self.set_stage_and_offload()
def __magic_name__( self ):
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
lowerCAmelCase__ : Optional[Any] = self.get_value('''zero_optimization.stage''' , -1 )
# offload
lowerCAmelCase__ : Dict = False
if self.is_zeroa() or self.is_zeroa():
lowerCAmelCase__ : Dict = set(['''cpu''', '''nvme'''] )
lowerCAmelCase__ : Optional[Any] = set(
[
self.get_value('''zero_optimization.offload_optimizer.device''' ),
self.get_value('''zero_optimization.offload_param.device''' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
lowerCAmelCase__ : List[Any] = True
def __magic_name__( self , __UpperCAmelCase ):
lowerCAmelCase__ : int = self.config
# find the config node of interest if it exists
lowerCAmelCase__ : Tuple = ds_key_long.split('''.''' )
lowerCAmelCase__ : int = nodes.pop()
for node in nodes:
lowerCAmelCase__ : int = config.get(__UpperCAmelCase )
if config is None:
return None, ds_key
return config, ds_key
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase=None ):
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.find_config_node(__UpperCAmelCase )
if config is None:
return default
return config.get(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase=False ):
lowerCAmelCase__ : Dict = self.config
# find the config node of interest if it exists
lowerCAmelCase__ : str = ds_key_long.split('''.''' )
for node in nodes:
lowerCAmelCase__ : Dict = config
lowerCAmelCase__ : List[str] = config.get(__UpperCAmelCase )
if config is None:
if must_exist:
raise ValueError(f"""Can't find {ds_key_long} entry in the config: {self.config}""" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(__UpperCAmelCase )
def __magic_name__( self , __UpperCAmelCase ):
lowerCAmelCase__ : Union[str, Any] = self.get_value(__UpperCAmelCase )
return False if value is None else bool(__UpperCAmelCase )
def __magic_name__( self , __UpperCAmelCase ):
lowerCAmelCase__ : Optional[Any] = self.get_value(__UpperCAmelCase )
return False if value is None else not bool(__UpperCAmelCase )
def __magic_name__( self ):
return self._stage == 2
def __magic_name__( self ):
return self._stage == 3
def __magic_name__( self ):
return self._offload
class _lowerCAmelCase :
def __init__( self , __UpperCAmelCase ):
lowerCAmelCase__ : int = engine
def __magic_name__( self , __UpperCAmelCase , **__UpperCAmelCase ):
# runs backpropagation and handles mixed precision
self.engine.backward(__UpperCAmelCase , **__UpperCAmelCase )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class _lowerCAmelCase ( _lowercase ):
def __init__( self , __UpperCAmelCase ):
super().__init__(__UpperCAmelCase , device_placement=__UpperCAmelCase , scaler=__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = hasattr(self.optimizer , '''overflow''' )
def __magic_name__( self , __UpperCAmelCase=None ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def __magic_name__( self ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def __magic_name__( self ):
if self.__has_overflow__:
return self.optimizer.overflow
return False
class _lowerCAmelCase ( _lowercase ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__( self ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class _lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=0.001 , __UpperCAmelCase=0 , **__UpperCAmelCase ):
lowerCAmelCase__ : Any = params
lowerCAmelCase__ : str = lr
lowerCAmelCase__ : List[str] = weight_decay
lowerCAmelCase__ : Dict = kwargs
class _lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=0 , **__UpperCAmelCase ):
lowerCAmelCase__ : Union[str, Any] = optimizer
lowerCAmelCase__ : Optional[Any] = total_num_steps
lowerCAmelCase__ : str = warmup_num_steps
lowerCAmelCase__ : Dict = kwargs
| 470 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
a_ : str = {
'''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''],
'''processing_speech_to_text''': ['''Speech2TextProcessor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[Any] = ['''Speech2TextTokenizer''']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = ['''Speech2TextFeatureExtractor''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = [
'''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSpeech2TextForConditionalGeneration''',
'''TFSpeech2TextModel''',
'''TFSpeech2TextPreTrainedModel''',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : str = [
'''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Speech2TextForConditionalGeneration''',
'''Speech2TextModel''',
'''Speech2TextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
a_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 594 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowercase( lowercase__ ):
'''simple docstring'''
__a : List[Any] = ['image_processor', 'tokenizer']
__a : List[Any] = 'BlipImageProcessor'
__a : str = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self , __a , __a ):
__lowerCamelCase : str = False
super().__init__(__a , __a )
__lowerCamelCase : Union[str, Any] = self.image_processor
def __call__( self , __a = None , __a = None , __a = True , __a = False , __a = None , __a = None , __a = 0 , __a = None , __a = None , __a = False , __a = False , __a = False , __a = False , __a = False , __a = True , __a = None , **__a , ):
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
__lowerCamelCase : List[Any] = self.tokenizer
__lowerCamelCase : List[str] = self.tokenizer(
text=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_token_type_ids=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
return text_encoding
# add pixel_values
__lowerCamelCase : Any = self.image_processor(__a , return_tensors=__a )
if text is not None:
__lowerCamelCase : Tuple = self.tokenizer(
text=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_token_type_ids=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
else:
__lowerCamelCase : Union[str, Any] = None
if text_encoding is not None:
encoding_image_processor.update(__a )
return encoding_image_processor
def snake_case_ ( self , *__a , **__a ):
return self.tokenizer.batch_decode(*__a , **__a )
def snake_case_ ( self , *__a , **__a ):
return self.tokenizer.decode(*__a , **__a )
@property
def snake_case_ ( self ):
__lowerCamelCase : Dict = self.tokenizer.model_input_names
__lowerCamelCase : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 594 | 1 |
"""simple docstring"""
from collections import deque
from .hash_table import HashTable
class __A (_UpperCAmelCase):
'''simple docstring'''
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Tuple ) ->Dict:
"""simple docstring"""
super().__init__(*lowercase_ , **lowercase_ )
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(lowercase_ )
snake_case_ = self.values[key]
def lowerCAmelCase ( self : Tuple ) ->Dict:
"""simple docstring"""
return (
sum(self.charge_factor - len(lowercase_ ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def lowerCAmelCase ( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : str=None ) ->Dict:
"""simple docstring"""
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(lowercase_ ) == 0
):
return key
return super()._collision_resolution(lowercase_ , lowercase_ ) | 707 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int]=13 , UpperCAmelCase_ : str=32 , UpperCAmelCase_ : List[Any]=3 , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : Dict=[10, 20, 30, 40] , UpperCAmelCase_ : List[Any]=[2, 2, 3, 2] , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Any=37 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : Optional[int]=10 , UpperCAmelCase_ : Dict=0.02 , UpperCAmelCase_ : int=["stage2", "stage3", "stage4"] , UpperCAmelCase_ : Optional[int]=[2, 3, 4] , UpperCAmelCase_ : List[str]=None , ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = num_channels
snake_case_ = num_stages
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = num_labels
snake_case_ = initializer_range
snake_case_ = out_features
snake_case_ = out_indices
snake_case_ = scope
def lowerCAmelCase ( self : List[str] ) ->str:
"""simple docstring"""
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.num_labels )
snake_case_ = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self : Dict ) ->Optional[int]:
"""simple docstring"""
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] ) ->List[Any]:
"""simple docstring"""
snake_case_ = ConvNextVaModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(UpperCAmelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] ) ->Any:
"""simple docstring"""
snake_case_ = ConvNextVaForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] ) ->Tuple:
"""simple docstring"""
snake_case_ = ConvNextVaBackbone(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(UpperCAmelCase_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
snake_case_ = None
snake_case_ = ConvNextVaBackbone(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(UpperCAmelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
def lowerCAmelCase ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class __A (snake_case__ , snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Optional[Any] = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
__lowercase: Union[str, Any] = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
__lowercase: Union[str, Any] = False
__lowercase: Optional[Any] = False
__lowercase: Any = False
__lowercase: Union[str, Any] = False
__lowercase: Dict = False
def lowerCAmelCase ( self : Union[str, Any] ) ->Tuple:
"""simple docstring"""
snake_case_ = ConvNextVaModelTester(self )
snake_case_ = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37 )
def lowerCAmelCase ( self : List[Any] ) ->Optional[Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self : str ) ->Optional[Any]:
"""simple docstring"""
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def lowerCAmelCase ( self : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Dict ) ->Optional[int]:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_with_labels()
snake_case_ = True
if model_class.__name__ in [
*get_values(UpperCAmelCase_ ),
*get_values(UpperCAmelCase_ ),
]:
continue
snake_case_ = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.train()
snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_ )
snake_case_ = model(**UpperCAmelCase_ ).loss
loss.backward()
def lowerCAmelCase ( self : Optional[int] ) ->Any:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_with_labels()
snake_case_ = False
snake_case_ = True
if (
model_class.__name__
in [*get_values(UpperCAmelCase_ ), *get_values(UpperCAmelCase_ )]
or not model_class.supports_gradient_checkpointing
):
continue
snake_case_ = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.gradient_checkpointing_enable()
model.train()
snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_ )
snake_case_ = model(**UpperCAmelCase_ ).loss
loss.backward()
def lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(UpperCAmelCase_ )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] ) ->Dict:
"""simple docstring"""
def check_hidden_states_output(UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str ):
snake_case_ = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
snake_case_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case_ = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase_ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Union[str, Any] ) ->Dict:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
@slow
def lowerCAmelCase ( self : Tuple ) ->str:
"""simple docstring"""
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = ConvNextVaModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def _a ( ) -> str:
snake_case_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __A (unittest.TestCase):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def lowerCAmelCase ( self : Tuple ) ->int:
"""simple docstring"""
snake_case_ = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(UpperCAmelCase_ )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = preprocessor(images=UpperCAmelCase_ , return_tensors="""pt""" ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
snake_case_ = model(**UpperCAmelCase_ )
# verify the logits
snake_case_ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
snake_case_ = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 2 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = '▁'
__UpperCAmelCase = {'vocab_file': 'sentencepiece.bpe.model'}
__UpperCAmelCase = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
}
}
__UpperCAmelCase = {
'facebook/mbart-large-en-ro': 1024,
'facebook/mbart-large-cc25': 1024,
}
# fmt: off
__UpperCAmelCase = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : List[str] = VOCAB_FILES_NAMES
_snake_case : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : int = ['''input_ids''', '''attention_mask''']
_snake_case : List[int] = []
_snake_case : List[int] = []
def __init__( self , _UpperCamelCase , _UpperCamelCase="<s>" , _UpperCamelCase="</s>" , _UpperCamelCase="</s>" , _UpperCamelCase="<s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<pad>" , _UpperCamelCase="<mask>" , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase = None , _UpperCamelCase=None , **_UpperCamelCase , ) -> Tuple:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : str = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token
UpperCAmelCase_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , cls_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenizer_file=_UpperCamelCase , src_lang=_UpperCamelCase , tgt_lang=_UpperCamelCase , additional_special_tokens=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
UpperCAmelCase_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCamelCase ) )
UpperCAmelCase_ : Any = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase_ : Optional[Any] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase_ : List[str] = 1
UpperCAmelCase_ : Union[str, Any] = len(self.sp_model )
UpperCAmelCase_ : Optional[int] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_UpperCamelCase )
}
UpperCAmelCase_ : Dict = {v: k for k, v in self.lang_code_to_id.items()}
UpperCAmelCase_ : List[str] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCAmelCase_ : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCAmelCase_ : Optional[int] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
UpperCAmelCase_ : Optional[int] = src_lang if src_lang is not None else 'en_XX'
UpperCAmelCase_ : Dict = self.lang_code_to_id[self._src_lang]
UpperCAmelCase_ : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = self.__dict__.copy()
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Optional[Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , _UpperCamelCase ) -> Union[str, Any]:
UpperCAmelCase_ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCAmelCase_ : Any = {}
UpperCAmelCase_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def __UpperCAmelCase ( self ) -> List[str]:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __UpperCAmelCase ( self ) -> str:
return self._src_lang
@src_lang.setter
def __UpperCAmelCase ( self , _UpperCamelCase ) -> None:
UpperCAmelCase_ : Optional[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
UpperCAmelCase_ : int = [1] * len(self.prefix_tokens )
UpperCAmelCase_ : Tuple = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_UpperCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(_UpperCamelCase )) + ([0] * len(_UpperCamelCase )) + suffix_ones
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
UpperCAmelCase_ : Optional[int] = [self.sep_token_id]
UpperCAmelCase_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
UpperCAmelCase_ : Any = src_lang
UpperCAmelCase_ : int = self(_UpperCamelCase , add_special_tokens=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )
UpperCAmelCase_ : Any = self.convert_tokens_to_ids(_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = tgt_lang_id
return inputs
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : Tuple = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[str]:
return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase_ : List[Any] = self.sp_model.PieceToId(_UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Union[str, Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> int:
UpperCAmelCase_ : Tuple = ''.join(_UpperCamelCase ).replace(_UpperCamelCase , ' ' ).strip()
return out_string
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]:
if not os.path.isdir(_UpperCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase_ : List[str] = os.path.join(
_UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , 'wb' ) as fi:
UpperCAmelCase_ : int = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = "en_XX" , _UpperCamelCase = None , _UpperCamelCase = "ro_RO" , **_UpperCamelCase , ) -> BatchEncoding:
UpperCAmelCase_ : List[str] = src_lang
UpperCAmelCase_ : List[Any] = tgt_lang
return super().prepare_seqaseq_batch(_UpperCamelCase , _UpperCamelCase , **_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCAmelCase ( self ) -> List[str]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> None:
UpperCAmelCase_ : Optional[Any] = self.lang_code_to_id[src_lang]
UpperCAmelCase_ : int = []
UpperCAmelCase_ : Optional[int] = [self.eos_token_id, self.cur_lang_code]
def __UpperCAmelCase ( self , _UpperCamelCase ) -> None:
UpperCAmelCase_ : List[str] = self.lang_code_to_id[lang]
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : Any = [self.eos_token_id, self.cur_lang_code]
| 406 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'configuration_table_transformer': [
'TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TableTransformerConfig',
'TableTransformerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TableTransformerForObjectDetection',
'TableTransformerModel',
'TableTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 406 | 1 |
'''simple docstring'''
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
a_ = logging.get_logger(__name__)
class UpperCAmelCase_ :
def __init__( self , lowercase_ = None , lowercase_ = None , lowercase_=None , lowercase_=None):
if not conversation_id:
snake_case_ : Union[str, Any] = uuid.uuida()
if past_user_inputs is None:
snake_case_ : Tuple = []
if generated_responses is None:
snake_case_ : str = []
snake_case_ : uuid.UUID = conversation_id
snake_case_ : List[str] = past_user_inputs
snake_case_ : List[str] = generated_responses
snake_case_ : Optional[str] = text
def __eq__( self , lowercase_):
if not isinstance(lowercase_ , lowercase_):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def snake_case__ ( self , lowercase_ , lowercase_ = False):
if self.new_user_input:
if overwrite:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
F'with: "{text}".')
snake_case_ : Union[str, Any] = text
else:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
F'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input')
else:
snake_case_ : Dict = text
def snake_case__ ( self):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input)
snake_case_ : Tuple = None
def snake_case__ ( self , lowercase_):
self.generated_responses.append(lowercase_)
def snake_case__ ( self):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self):
snake_case_ : int = F'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
snake_case_ : Optional[int] = "user" if is_user else "bot"
output += F'{name} >> {text} \n'
return output
@add_end_docstrings(
snake_case__ , R"""
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
""" , )
class UpperCAmelCase_ ( snake_case__ ):
def __init__( self , *lowercase_ , **lowercase_):
super().__init__(*lowercase_ , **lowercase_)
if self.tokenizer.pad_token_id is None:
snake_case_ : Any = self.tokenizer.eos_token
def snake_case__ ( self , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_):
snake_case_ : int = {}
snake_case_ : Optional[Any] = {}
snake_case_ : List[str] = {}
if min_length_for_response is not None:
snake_case_ : List[Any] = min_length_for_response
if minimum_tokens is not None:
snake_case_ : Optional[int] = minimum_tokens
if "max_length" in generate_kwargs:
snake_case_ : str = generate_kwargs["max_length"]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
snake_case_ : Union[str, Any] = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(lowercase_)
return preprocess_params, forward_params, postprocess_params
def __call__( self , lowercase_ , lowercase_=0 , **lowercase_):
snake_case_ : Optional[Any] = super().__call__(lowercase_ , num_workers=lowercase_ , **lowercase_)
if isinstance(lowercase_ , lowercase_) and len(lowercase_) == 1:
return outputs[0]
return outputs
def snake_case__ ( self , lowercase_ , lowercase_=32):
if not isinstance(lowercase_ , lowercase_):
raise ValueError("ConversationalPipeline, expects Conversation as inputs")
if conversation.new_user_input is None:
raise ValueError(
F'Conversation with UUID {type(conversation.uuid)} does not contain new user input to process. '
"Add user inputs with the conversation's `add_user_input` method")
if hasattr(self.tokenizer , "_build_conversation_input_ids"):
snake_case_ : List[Any] = self.tokenizer._build_conversation_input_ids(lowercase_)
else:
# If the tokenizer cannot handle conversations, we default to only the old version
snake_case_ : Union[str, Any] = self._legacy_parse_and_tokenize(lowercase_)
if self.framework == "pt":
snake_case_ : Any = torch.LongTensor([input_ids])
elif self.framework == "tf":
snake_case_ : Any = tf.constant([input_ids])
return {"input_ids": input_ids, "conversation": conversation}
def snake_case__ ( self , lowercase_ , lowercase_=10 , **lowercase_):
snake_case_ : Tuple = generate_kwargs.get("max_length" , self.model.config.max_length)
snake_case_ : str = model_inputs["input_ids"].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})')
snake_case_ : Optional[int] = max_length - minimum_tokens
snake_case_ : List[str] = model_inputs["input_ids"][:, -trim:]
if "attention_mask" in model_inputs:
snake_case_ : Optional[Any] = model_inputs["attention_mask"][:, -trim:]
snake_case_ : List[str] = model_inputs.pop("conversation")
snake_case_ : Optional[Any] = max_length
snake_case_ : Tuple = self.model.generate(**lowercase_ , **lowercase_)
if self.model.config.is_encoder_decoder:
snake_case_ : str = 1
else:
snake_case_ : int = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def snake_case__ ( self , lowercase_ , lowercase_=True):
snake_case_ : List[Any] = model_outputs["output_ids"]
snake_case_ : str = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , )
snake_case_ : Tuple = model_outputs["conversation"]
conversation.mark_processed()
conversation.append_response(lowercase_)
return conversation
def snake_case__ ( self , lowercase_):
snake_case_ : str = self.tokenizer.eos_token_id
snake_case_ : Optional[int] = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_) + [eos_token_id])
else:
input_ids.extend(self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_))
if len(lowercase_) > self.tokenizer.model_max_length:
snake_case_ : Optional[int] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 92 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
a_ = TypeVar("T")
a_ = TypeVar("U")
class UpperCAmelCase_ ( Generic[T, U] ):
def __init__( self , lowercase_ , lowercase_):
snake_case_ : Any = key
snake_case_ : List[Any] = val
snake_case_ : DoubleLinkedListNode[T, U] | None = None
snake_case_ : DoubleLinkedListNode[T, U] | None = None
def __repr__( self):
return (
F'Node: key: {self.key}, val: {self.val}, '
F'has next: {bool(self.next)}, has prev: {bool(self.prev)}'
)
class UpperCAmelCase_ ( Generic[T, U] ):
def __init__( self):
snake_case_ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase_ , lowercase_)
snake_case_ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase_ , lowercase_)
snake_case_ , snake_case_ : Union[str, Any] = self.rear, self.head
def __repr__( self):
snake_case_ : Dict = ["DoubleLinkedList"]
snake_case_ : Dict = self.head
while node.next is not None:
rep.append(str(lowercase_))
snake_case_ : List[str] = node.next
rep.append(str(self.rear))
return ",\n ".join(lowercase_)
def snake_case__ ( self , lowercase_):
snake_case_ : List[str] = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
snake_case_ : Tuple = node
snake_case_ : str = previous
snake_case_ : Optional[Any] = node
snake_case_ : Any = self.rear
def snake_case__ ( self , lowercase_):
if node.prev is None or node.next is None:
return None
snake_case_ : Union[str, Any] = node.next
snake_case_ : Optional[int] = node.prev
snake_case_ : str = None
snake_case_ : int = None
return node
class UpperCAmelCase_ ( Generic[T, U] ):
UpperCAmelCase_ = {}
def __init__( self , lowercase_):
snake_case_ : DoubleLinkedList[T, U] = DoubleLinkedList()
snake_case_ : List[str] = capacity
snake_case_ : Any = 0
snake_case_ : Dict = 0
snake_case_ : Union[str, Any] = 0
snake_case_ : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self):
return (
F'CacheInfo(hits={self.hits}, misses={self.miss}, '
F'capacity={self.capacity}, current size={self.num_keys})'
)
def __contains__( self , lowercase_):
return key in self.cache
def snake_case__ ( self , lowercase_):
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
snake_case_ : DoubleLinkedListNode[T, U] = self.cache[key]
snake_case_ : Optional[Any] = self.list.remove(self.cache[key])
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowercase_)
return node.val
self.miss += 1
return None
def snake_case__ ( self , lowercase_ , lowercase_):
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
snake_case_ : Optional[Any] = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowercase_) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
snake_case_ : Dict = DoubleLinkedListNode(lowercase_ , lowercase_)
self.list.add(self.cache[key])
self.num_keys += 1
else:
# bump node to the end of the list, update value
snake_case_ : List[Any] = self.list.remove(self.cache[key])
assert node is not None # node guaranteed to be in list
snake_case_ : Optional[Any] = value
self.list.add(lowercase_)
@classmethod
def snake_case__ ( cls , lowercase_ = 1_28):
def cache_decorator_inner(lowercase_) -> Callable[..., U]:
def cache_decorator_wrapper(*lowercase_) -> U:
if func not in cls.decorator_function_to_instance_map:
snake_case_ : List[str] = LRUCache(lowercase_)
snake_case_ : Optional[Any] = cls.decorator_function_to_instance_map[func].get(args[0])
if result is None:
snake_case_ : Any = func(*lowercase_)
cls.decorator_function_to_instance_map[func].put(args[0] , lowercase_)
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowercase_ , "cache_info" , lowercase_) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92 | 1 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 475 | import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = DDIMPipeline
__A = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__A = PipelineTesterMixin.required_optional_params - {
'''num_images_per_prompt''',
'''latents''',
'''callback''',
'''callback_steps''',
}
__A = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
__A = False
def __UpperCAmelCase ( self : int) -> List[str]:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
_UpperCamelCase = DDIMScheduler()
_UpperCamelCase = {"unet": unet, "scheduler": scheduler}
return components
def __UpperCAmelCase ( self : int , lowercase_ : List[str] , lowercase_ : Optional[Any]=0) -> Optional[Any]:
"""simple docstring"""
if str(lowercase_).startswith("mps"):
_UpperCamelCase = torch.manual_seed(lowercase_)
else:
_UpperCamelCase = torch.Generator(device=lowercase_).manual_seed(lowercase_)
_UpperCamelCase = {
"batch_size": 1,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __UpperCAmelCase ( self : Dict) -> Tuple:
"""simple docstring"""
_UpperCamelCase = "cpu"
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**lowercase_)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = self.get_dummy_inputs(lowercase_)
_UpperCamelCase = pipe(**lowercase_).images
_UpperCamelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3))
_UpperCamelCase = np.array(
[1.0_0_0e0_0, 5.7_1_7e-0_1, 4.7_1_7e-0_1, 1.0_0_0e0_0, 0.0_0_0e0_0, 1.0_0_0e0_0, 3.0_0_0e-0_4, 0.0_0_0e0_0, 9.0_0_0e-0_4])
_UpperCamelCase = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(lowercase_ , 1e-3)
def __UpperCAmelCase ( self : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3)
def __UpperCAmelCase ( self : List[Any]) -> Union[str, Any]:
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3e-3)
def __UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3e-3)
def __UpperCAmelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : List[Any]) -> str:
"""simple docstring"""
_UpperCamelCase = "google/ddpm-cifar10-32"
_UpperCamelCase = UNetaDModel.from_pretrained(lowercase_)
_UpperCamelCase = DDIMScheduler()
_UpperCamelCase = DDIMPipeline(unet=lowercase_ , scheduler=lowercase_)
ddim.to(lowercase_)
ddim.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = ddim(generator=lowercase_ , eta=0.0 , output_type="numpy").images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase = np.array([0.17_23, 0.16_17, 0.16_00, 0.16_26, 0.14_97, 0.15_13, 0.15_05, 0.14_42, 0.14_53])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCAmelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
_UpperCamelCase = "google/ddpm-ema-bedroom-256"
_UpperCamelCase = UNetaDModel.from_pretrained(lowercase_)
_UpperCamelCase = DDIMScheduler.from_pretrained(lowercase_)
_UpperCamelCase = DDIMPipeline(unet=lowercase_ , scheduler=lowercase_)
ddpm.to(lowercase_)
ddpm.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = ddpm(generator=lowercase_ , output_type="numpy").images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_UpperCamelCase = np.array([0.00_60, 0.02_01, 0.03_44, 0.00_24, 0.00_18, 0.00_02, 0.00_22, 0.00_00, 0.00_69])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 547 | 0 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class a__( unittest.TestCase ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=True , _UpperCAmelCase=1 / 255 , _UpperCAmelCase=True , ) -> Optional[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case__ =size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
snake_case__ =parent
snake_case__ =batch_size
snake_case__ =num_channels
snake_case__ =min_resolution
snake_case__ =max_resolution
snake_case__ =do_resize
snake_case__ =size
snake_case__ =do_normalize
snake_case__ =image_mean
snake_case__ =image_std
snake_case__ =do_rescale
snake_case__ =rescale_factor
snake_case__ =do_pad
def _lowercase ( self ) -> Optional[int]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase=False ) -> int:
if not batched:
snake_case__ =image_inputs[0]
if isinstance(_UpperCAmelCase , Image.Image ):
snake_case__ , snake_case__ =image.size
else:
snake_case__ , snake_case__ =image.shape[1], image.shape[2]
if w < h:
snake_case__ =int(self.size['shortest_edge'] * h / w )
snake_case__ =self.size['shortest_edge']
elif w > h:
snake_case__ =self.size['shortest_edge']
snake_case__ =int(self.size['shortest_edge'] * w / h )
else:
snake_case__ =self.size['shortest_edge']
snake_case__ =self.size['shortest_edge']
else:
snake_case__ =[]
for image in image_inputs:
snake_case__ , snake_case__ =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ =max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[0] )[0]
snake_case__ =max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a__( snake_case__ , unittest.TestCase ):
a_ : Optional[int] = ConditionalDetrImageProcessor if is_vision_available() else None
def _lowercase ( self ) -> Tuple:
snake_case__ =ConditionalDetrImageProcessingTester(self )
@property
def _lowercase ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self ) -> List[Any]:
snake_case__ =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'image_std' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'size' ) )
def _lowercase ( self ) -> Any:
snake_case__ =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , _UpperCAmelCase )
snake_case__ =self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_UpperCAmelCase )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _UpperCAmelCase )
def _lowercase ( self ) -> str:
pass
def _lowercase ( self ) -> Union[str, Any]:
# Initialize image_processing
snake_case__ =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ =prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
snake_case__ =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case__ , snake_case__ =self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ , snake_case__ =self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
snake_case__ =image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self ) -> Any:
# Initialize image_processing
snake_case__ =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ =prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
snake_case__ =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case__ , snake_case__ =self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ =image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
snake_case__ , snake_case__ =self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self ) -> Optional[int]:
# Initialize image_processing
snake_case__ =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ =prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
snake_case__ =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case__ , snake_case__ =self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ =image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
snake_case__ , snake_case__ =self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowercase ( self ) -> Any:
# prepare image and target
snake_case__ =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
snake_case__ =json.loads(f.read() )
snake_case__ ={'image_id': 3_9769, 'annotations': target}
# encode them
snake_case__ =ConditionalDetrImageProcessor.from_pretrained('microsoft/conditional-detr-resnet-50' )
snake_case__ =image_processing(images=_UpperCAmelCase , annotations=_UpperCAmelCase , return_tensors='pt' )
# verify pixel values
snake_case__ =torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _UpperCAmelCase )
snake_case__ =torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _UpperCAmelCase , atol=1E-4 ) )
# verify area
snake_case__ =torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _UpperCAmelCase ) )
# verify boxes
snake_case__ =torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _UpperCAmelCase )
snake_case__ =torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _UpperCAmelCase , atol=1E-3 ) )
# verify image_id
snake_case__ =torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _UpperCAmelCase ) )
# verify is_crowd
snake_case__ =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _UpperCAmelCase ) )
# verify class_labels
snake_case__ =torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _UpperCAmelCase ) )
# verify orig_size
snake_case__ =torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _UpperCAmelCase ) )
# verify size
snake_case__ =torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _UpperCAmelCase ) )
@slow
def _lowercase ( self ) -> Optional[int]:
# prepare image, target and masks_path
snake_case__ =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
snake_case__ =json.loads(f.read() )
snake_case__ ={'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
snake_case__ =pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
snake_case__ =ConditionalDetrImageProcessor(format='coco_panoptic' )
snake_case__ =image_processing(images=_UpperCAmelCase , annotations=_UpperCAmelCase , masks_path=_UpperCAmelCase , return_tensors='pt' )
# verify pixel values
snake_case__ =torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _UpperCAmelCase )
snake_case__ =torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _UpperCAmelCase , atol=1E-4 ) )
# verify area
snake_case__ =torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _UpperCAmelCase ) )
# verify boxes
snake_case__ =torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _UpperCAmelCase )
snake_case__ =torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _UpperCAmelCase , atol=1E-3 ) )
# verify image_id
snake_case__ =torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _UpperCAmelCase ) )
# verify is_crowd
snake_case__ =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _UpperCAmelCase ) )
# verify class_labels
snake_case__ =torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _UpperCAmelCase ) )
# verify masks
snake_case__ =82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _UpperCAmelCase )
# verify orig_size
snake_case__ =torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _UpperCAmelCase ) )
# verify size
snake_case__ =torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _UpperCAmelCase ) )
| 718 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[str] = {
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class a__( snake_case__ ):
a_ : Dict = '''pix2struct_text_model'''
a_ : Optional[int] = ['''past_key_values''']
a_ : int = {
'''hidden_size''': '''hidden_size''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _UpperCAmelCase=5_0244 , _UpperCAmelCase=768 , _UpperCAmelCase=64 , _UpperCAmelCase=2048 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=32 , _UpperCAmelCase=128 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1E-6 , _UpperCAmelCase=1.0 , _UpperCAmelCase="gelu_new" , _UpperCAmelCase=0 , _UpperCAmelCase=False , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=True , **_UpperCAmelCase , ) -> int:
snake_case__ =vocab_size
snake_case__ =hidden_size
snake_case__ =d_kv
snake_case__ =d_ff
snake_case__ =num_layers
snake_case__ =num_heads
snake_case__ =relative_attention_num_buckets
snake_case__ =relative_attention_max_distance
snake_case__ =dropout_rate
snake_case__ =layer_norm_epsilon
snake_case__ =initializer_factor
snake_case__ =use_cache
snake_case__ =eos_token_id
snake_case__ =decoder_start_token_id
# for backwards compatibility
snake_case__ =dense_act_fn
super().__init__(
pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , is_decoder=_UpperCAmelCase , **_UpperCAmelCase , )
@classmethod
def _lowercase ( cls , _UpperCAmelCase , **_UpperCAmelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_UpperCAmelCase )
snake_case__ , snake_case__ =cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
snake_case__ =config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class a__( snake_case__ ):
a_ : List[Any] = '''pix2struct_vision_model'''
def __init__( self , _UpperCAmelCase=768 , _UpperCAmelCase=768 , _UpperCAmelCase=2048 , _UpperCAmelCase=64 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase="gelu_new" , _UpperCAmelCase=1E-6 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1E-10 , _UpperCAmelCase=1.0 , _UpperCAmelCase=4096 , _UpperCAmelCase=32 , _UpperCAmelCase=128 , **_UpperCAmelCase , ) -> int:
super().__init__(**_UpperCAmelCase )
snake_case__ =hidden_size
snake_case__ =patch_embed_hidden_size
snake_case__ =d_ff
snake_case__ =dropout_rate
snake_case__ =num_hidden_layers
snake_case__ =num_attention_heads
snake_case__ =initializer_range
snake_case__ =initializer_factor
snake_case__ =attention_dropout
snake_case__ =layer_norm_eps
snake_case__ =dense_act_fn
snake_case__ =seq_len
snake_case__ =relative_attention_num_buckets
snake_case__ =relative_attention_max_distance
snake_case__ =d_kv
@classmethod
def _lowercase ( cls , _UpperCAmelCase , **_UpperCAmelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_UpperCAmelCase )
snake_case__ , snake_case__ =cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
snake_case__ =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class a__( snake_case__ ):
a_ : Dict = '''pix2struct'''
a_ : Optional[int] = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=1.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=True , **_UpperCAmelCase , ) -> int:
super().__init__(tie_word_embeddings=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase )
if text_config is None:
snake_case__ ={}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.' )
if vision_config is None:
snake_case__ ={}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.' )
snake_case__ =PixaStructTextConfig(**_UpperCAmelCase )
snake_case__ =PixaStructVisionConfig(**_UpperCAmelCase )
snake_case__ =self.text_config.decoder_start_token_id
snake_case__ =self.text_config.pad_token_id
snake_case__ =self.text_config.eos_token_id
snake_case__ =initializer_factor
snake_case__ =initializer_range
snake_case__ =self.initializer_range
snake_case__ =self.initializer_range
snake_case__ =is_vqa
@classmethod
def _lowercase ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> Any:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCAmelCase )
def _lowercase ( self ) -> Optional[int]:
snake_case__ =copy.deepcopy(self.__dict__ )
snake_case__ =self.text_config.to_dict()
snake_case__ =self.vision_config.to_dict()
snake_case__ =self.__class__.model_type
return output
| 581 | 0 |
"""simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
A = '''\
'''
A = '''
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
'''
A = '''
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to \'cuda\' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]
>>> results = perplexity.compute(model_id=\'gpt2\',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
78.22
>>> print(round(results["perplexities"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = datasets.load_dataset("wikitext",
... "wikitext-2-raw-v1",
... split="test")["text"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!=\'\']
>>> results = perplexity.compute(model_id=\'gpt2\',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
60.35
>>> print(round(results["perplexities"][0], 2))
81.12
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
'''simple docstring'''
def _lowerCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string''' ),
} ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 16 , _UpperCAmelCase = True , _UpperCAmelCase=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
__a : str = '''cuda'''
else:
__a : str = '''cuda''' if torch.cuda.is_available() else '''cpu'''
__a : Any = AutoModelForCausalLM.from_pretrained(_UpperCAmelCase )
__a : Union[str, Any] = model.to(_UpperCAmelCase )
__a : str = AutoTokenizer.from_pretrained(_UpperCAmelCase )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
__a : Optional[int] = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_UpperCAmelCase ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
__a : List[str] = model.config.max_length - 1
else:
__a : Optional[int] = model.config.max_length
__a : int = tokenizer(
_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors='''pt''' , return_attention_mask=_UpperCAmelCase , ).to(_UpperCAmelCase )
__a : List[Any] = encodings['''input_ids''']
__a : Tuple = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
__a : Any = []
__a : int = CrossEntropyLoss(reduction='''none''' )
for start_index in logging.tqdm(range(0 , len(_UpperCAmelCase ) , _UpperCAmelCase ) ):
__a : List[Any] = min(start_index + batch_size , len(_UpperCAmelCase ) )
__a : Optional[int] = encoded_texts[start_index:end_index]
__a : Dict = attn_masks[start_index:end_index]
if add_start_token:
__a : Dict = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_UpperCAmelCase )
__a : List[str] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
__a : Tuple = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_UpperCAmelCase ), attn_mask] , dim=1 )
__a : List[str] = encoded_batch
with torch.no_grad():
__a : Union[str, Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).logits
__a : Optional[int] = out_logits[..., :-1, :].contiguous()
__a : Tuple = labels[..., 1:].contiguous()
__a : Tuple = attn_mask[..., 1:].contiguous()
__a : int = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _UpperCAmelCase ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_UpperCAmelCase )} | 52 | """simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Optional[Any] = DiTPipeline
lowerCamelCase__ : Dict = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
lowerCamelCase__ : Union[str, Any] = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
lowerCamelCase__ : Dict = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
lowerCamelCase__ : Optional[Any] = False
def _UpperCAmelCase ( self ) -> Any:
torch.manual_seed(0 )
lowercase__ : Dict = TransformeraDModel(
sample_size=1_6 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=a , activation_fn='gelu-approximate' , num_embeds_ada_norm=1_0_0_0 , norm_type='ada_norm_zero' , norm_elementwise_affine=a , )
lowercase__ : int = AutoencoderKL()
lowercase__ : Dict = DDIMScheduler()
lowercase__ : List[str] = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def _UpperCAmelCase ( self , a , a=0 ) -> Dict:
if str(a ).startswith('mps' ):
lowercase__ : Union[str, Any] = torch.manual_seed(a )
else:
lowercase__ : List[str] = torch.Generator(device=a ).manual_seed(a )
lowercase__ : Optional[int] = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : str = 'cpu'
lowercase__ : Any = self.get_dummy_components()
lowercase__ : List[Any] = self.pipeline_class(**a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ : str = self.get_dummy_inputs(a )
lowercase__ : List[str] = pipe(**a ).images
lowercase__ : int = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 1_6, 1_6, 3) )
lowercase__ : Any = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
lowercase__ : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a , 1e-3 )
def _UpperCAmelCase ( self ) -> Any:
self._test_inference_batch_single_identical(relax_max_difference=a , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _UpperCAmelCase ( self ) -> int:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> Any:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Optional[int] = torch.manual_seed(0 )
lowercase__ : Any = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
lowercase__ : List[Any] = ['vase', 'umbrella', 'white shark', 'white wolf']
lowercase__ : Optional[int] = pipe.get_label_ids(a )
lowercase__ : Optional[int] = pipe(a , generator=a , num_inference_steps=4_0 , output_type='np' ).images
for word, image in zip(a , a ):
lowercase__ : Tuple = load_numpy(
f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-2
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : List[Any] = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
lowercase__ : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
lowercase__ : Tuple = ['vase', 'umbrella']
lowercase__ : List[str] = pipe.get_label_ids(a )
lowercase__ : Optional[Any] = torch.manual_seed(0 )
lowercase__ : Optional[Any] = pipe(a , generator=a , num_inference_steps=2_5 , output_type='np' ).images
for word, image in zip(a , a ):
lowercase__ : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
f"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-1
| 599 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__SCREAMING_SNAKE_CASE = {
"""configuration_chinese_clip""": [
"""CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ChineseCLIPConfig""",
"""ChineseCLIPOnnxConfig""",
"""ChineseCLIPTextConfig""",
"""ChineseCLIPVisionConfig""",
],
"""processing_chinese_clip""": ["""ChineseCLIPProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = ["""ChineseCLIPFeatureExtractor"""]
__SCREAMING_SNAKE_CASE = ["""ChineseCLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
"""CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ChineseCLIPModel""",
"""ChineseCLIPPreTrainedModel""",
"""ChineseCLIPTextModel""",
"""ChineseCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 721 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : Optional[Any] = RobertaPreLayerNormConfig.from_pretrained(
_lowerCamelCase , architectures=["RobertaPreLayerNormForMaskedLM"] )
# convert state_dict
A : List[Any] = torch.load(hf_hub_download(repo_id=_lowerCamelCase , filename="pytorch_model.bin" ) )
A : Union[str, Any] = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("roberta." ):
A : int = "roberta_prelayernorm." + tensor_key[len("roberta." ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ):
continue
A : Any = tensor_value
A : Optional[int] = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=_lowerCamelCase , config=_lowerCamelCase , state_dict=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
# convert tokenizer
A : Optional[Any] = AutoTokenizer.from_pretrained(_lowerCamelCase )
tokenizer.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint-repo""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path) | 17 | 0 |
def __A ( __lowerCamelCase , __lowerCamelCase ) -> float:
_validate_point(__lowerCamelCase )
_validate_point(__lowerCamelCase )
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(__lowerCamelCase , __lowerCamelCase ) ) )
def __A ( __lowerCamelCase ) -> None:
if point:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
for item in point:
if not isinstance(__lowerCamelCase , (int, float) ):
a = (
"""Expected a list of numbers as input, found """
f'{type(__lowerCamelCase ).__name__}'
)
raise TypeError(__lowerCamelCase )
else:
a = f'Expected a list of numbers as input, found {type(__lowerCamelCase ).__name__}'
raise TypeError(__lowerCamelCase )
else:
raise ValueError("""Missing an input""" )
def __A ( __lowerCamelCase , __lowerCamelCase ) -> float:
_validate_point(__lowerCamelCase )
_validate_point(__lowerCamelCase )
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(__lowerCamelCase , __lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 468 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
def __A ( __lowerCamelCase , __lowerCamelCase ) -> Dict:
a = RobertaPreLayerNormConfig.from_pretrained(
__lowerCamelCase , architectures=["""RobertaPreLayerNormForMaskedLM"""] )
# convert state_dict
a = torch.load(hf_hub_download(repo_id=__lowerCamelCase , filename="""pytorch_model.bin""" ) )
a = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("""roberta.""" ):
a = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ):
continue
a = tensor_value
a = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__lowerCamelCase , config=__lowerCamelCase , state_dict=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
# convert tokenizer
a = AutoTokenizer.from_pretrained(__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint-repo",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__UpperCamelCase : Any = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 468 | 1 |
from __future__ import annotations
from typing import Generic, TypeVar
lowerCAmelCase = TypeVar("""T""")
class lowerCamelCase ( Generic[T] ):
def __init__( self , lowercase__):
__UpperCAmelCase : Optional[int] = data
__UpperCAmelCase : str = self
__UpperCAmelCase : Optional[Any] = 0
class lowerCamelCase ( Generic[T] ):
def __init__( self):
# map from node name to the node object
__UpperCAmelCase : dict[T, DisjointSetTreeNode[T]] = {}
def A( self , lowercase__):
# create a new set with x as its member
__UpperCAmelCase : Union[str, Any] = DisjointSetTreeNode(lowercase__)
def A( self , lowercase__):
# find the set x belongs to (with path-compression)
__UpperCAmelCase : Optional[int] = self.map[data]
if elem_ref != elem_ref.parent:
__UpperCAmelCase : Any = self.find_set(elem_ref.parent.data)
return elem_ref.parent
def A( self , lowercase__ , lowercase__):
# helper function for union operation
if nodea.rank > nodea.rank:
__UpperCAmelCase : Dict = nodea
else:
__UpperCAmelCase : Tuple = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def A( self , lowercase__ , lowercase__):
# merge 2 disjoint sets
self.link(self.find_set(lowercase__) , self.find_set(lowercase__))
class lowerCamelCase ( Generic[T] ):
def __init__( self):
# connections: map from the node to the neighbouring nodes (with weights)
__UpperCAmelCase : dict[T, dict[T, int]] = {}
def A( self , lowercase__):
# add a node ONLY if its not present in the graph
if node not in self.connections:
__UpperCAmelCase : str = {}
def A( self , lowercase__ , lowercase__ , lowercase__):
# add an edge with the given weight
self.add_node(lowercase__)
self.add_node(lowercase__)
__UpperCAmelCase : Tuple = weight
__UpperCAmelCase : Dict = weight
def A( self):
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : List[Any] = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start))
edges.append((start, end, self.connections[start][end]))
edges.sort(key=lambda lowercase__: x[2])
# creating the disjoint set
__UpperCAmelCase : Tuple = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(lowercase__)
# MST generation
__UpperCAmelCase : Any = 0
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : Tuple = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections) - 1:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = edges[index]
index += 1
__UpperCAmelCase : List[str] = disjoint_set.find_set(lowercase__)
__UpperCAmelCase : int = disjoint_set.find_set(lowercase__)
if parent_u != parent_v:
num_edges += 1
graph.add_edge(lowercase__ , lowercase__ , lowercase__)
disjoint_set.union(lowercase__ , lowercase__)
return graph
| 675 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
if not nums:
return 0
__UpperCAmelCase : int = nums[0]
__UpperCAmelCase : Optional[Any] = 0
for num in nums[1:]:
__UpperCAmelCase , __UpperCAmelCase : int = (
max_excluding + num,
max(lowercase_ , lowercase_ ),
)
return max(lowercase_ , lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675 | 1 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: Dict ) -> int:
"""simple docstring"""
lowercase__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowercase__ = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ )
lowercase__ = -1
lowercase__ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ )
lowercase__ = model.generate(UpperCamelCase_ , max_new_tokens=10 , do_sample=UpperCamelCase_ )
lowercase__ = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowercase__ = TextStreamer(UpperCamelCase_ )
model.generate(UpperCamelCase_ , max_new_tokens=10 , do_sample=UpperCamelCase_ , streamer=UpperCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowercase__ = cs.out[:-1]
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
lowercase__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowercase__ = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ )
lowercase__ = -1
lowercase__ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ )
lowercase__ = model.generate(UpperCamelCase_ , max_new_tokens=10 , do_sample=UpperCamelCase_ )
lowercase__ = tokenizer.decode(greedy_ids[0] )
lowercase__ = TextIteratorStreamer(UpperCamelCase_ )
lowercase__ = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
lowercase__ = Thread(target=model.generate , kwargs=UpperCamelCase_ )
thread.start()
lowercase__ = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: str ) -> List[str]:
"""simple docstring"""
lowercase__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowercase__ = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ )
lowercase__ = -1
lowercase__ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ )
lowercase__ = model.generate(UpperCamelCase_ , max_new_tokens=10 , do_sample=UpperCamelCase_ )
lowercase__ = greedy_ids[:, input_ids.shape[1] :]
lowercase__ = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowercase__ = TextStreamer(UpperCamelCase_ , skip_prompt=UpperCamelCase_ )
model.generate(UpperCamelCase_ , max_new_tokens=10 , do_sample=UpperCamelCase_ , streamer=UpperCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowercase__ = cs.out[:-1]
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] ) -> str:
"""simple docstring"""
lowercase__ = AutoTokenizer.from_pretrained('''distilgpt2''' )
lowercase__ = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(UpperCamelCase_ )
lowercase__ = -1
lowercase__ = torch.ones((1, 5) , device=UpperCamelCase_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowercase__ = TextStreamer(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
model.generate(UpperCamelCase_ , max_new_tokens=1 , do_sample=UpperCamelCase_ , streamer=UpperCamelCase_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowercase__ = cs.out[:-1] # Remove the final "\n"
lowercase__ = tokenizer(UpperCamelCase_ , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def lowerCamelCase_ ( self: List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowercase__ = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ )
lowercase__ = -1
lowercase__ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ )
lowercase__ = TextIteratorStreamer(UpperCamelCase_ , timeout=0.001 )
lowercase__ = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
lowercase__ = Thread(target=model.generate , kwargs=UpperCamelCase_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(UpperCamelCase_ ):
lowercase__ = ''''''
for new_text in streamer:
streamer_text += new_text
| 43 |
"""simple docstring"""
A_ = 2_56
# Modulus to hash a string
A_ = 1_00_00_03
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
_snake_case : Any = len(snake_case__ )
_snake_case : Tuple = len(snake_case__ )
if p_len > t_len:
return False
_snake_case : str = 0
_snake_case : Tuple = 0
_snake_case : Tuple = 1
# Calculating the hash of pattern and substring of text
for i in range(snake_case__ ):
_snake_case : Optional[int] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_snake_case : int = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_snake_case : Any = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_snake_case : List[str] = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : List[Any] = """abc1abc12"""
_snake_case : Tuple = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
_snake_case : Optional[Any] = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(snake_case__ , snake_case__ ) and not rabin_karp(snake_case__ , snake_case__ )
# Test 2)
_snake_case : Tuple = """ABABX"""
_snake_case : Optional[Any] = """ABABZABABYABABX"""
assert rabin_karp(snake_case__ , snake_case__ )
# Test 3)
_snake_case : Union[str, Any] = """AAAB"""
_snake_case : str = """ABAAAAAB"""
assert rabin_karp(snake_case__ , snake_case__ )
# Test 4)
_snake_case : List[str] = """abcdabcy"""
_snake_case : Optional[int] = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(snake_case__ , snake_case__ )
# Test 5)
_snake_case : Union[str, Any] = """Lü"""
_snake_case : Optional[int] = """Lüsai"""
assert rabin_karp(snake_case__ , snake_case__ )
_snake_case : Any = """Lue"""
assert not rabin_karp(snake_case__ , snake_case__ )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 609 | 0 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase_ :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=3 , __lowerCAmelCase=3_2 , __lowerCAmelCase=3 , __lowerCAmelCase=1_0 , __lowerCAmelCase=[1_0, 2_0, 3_0, 4_0] , __lowerCAmelCase=[1, 1, 2, 1] , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase="relu" , __lowerCAmelCase=3 , __lowerCAmelCase=None , ):
"""simple docstring"""
__magic_name__ :Tuple = parent
__magic_name__ :Optional[int] = batch_size
__magic_name__ :str = image_size
__magic_name__ :List[str] = num_channels
__magic_name__ :Union[str, Any] = embeddings_size
__magic_name__ :List[Any] = hidden_sizes
__magic_name__ :List[Any] = depths
__magic_name__ :Any = is_training
__magic_name__ :Tuple = use_labels
__magic_name__ :str = hidden_act
__magic_name__ :List[str] = num_labels
__magic_name__ :Optional[int] = scope
__magic_name__ :List[Any] = len(__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ :Dict = None
if self.use_labels:
__magic_name__ :Tuple = ids_tensor([self.batch_size] , self.num_labels )
__magic_name__ :List[str] = self.get_config()
return config, pixel_values, labels
def A ( self ):
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :List[str] = TFResNetModel(config=__lowerCAmelCase )
__magic_name__ :Optional[int] = model(__lowerCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Dict = self.num_labels
__magic_name__ :Tuple = TFResNetForImageClassification(__lowerCAmelCase )
__magic_name__ :List[Any] = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.prepare_config_and_inputs()
__magic_name__ :List[str] = config_and_inputs
__magic_name__ :Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
a__ = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
a__ = (
{'''feature-extraction''': TFResNetModel, '''image-classification''': TFResNetForImageClassification}
if is_tf_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = TFResNetModelTester(self )
__magic_name__ :int = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self ):
"""simple docstring"""
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''' )
def A ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''' )
def A ( self ):
"""simple docstring"""
pass
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ :Optional[int] = model_class(__lowerCAmelCase )
__magic_name__ :str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ :str = [*signature.parameters.keys()]
__magic_name__ :Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
def check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
__magic_name__ :Tuple = model_class(__lowerCAmelCase )
__magic_name__ :int = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
__magic_name__ :Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__magic_name__ :Dict = self.model_tester.num_stages
self.assertEqual(len(__lowerCAmelCase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__magic_name__ :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ :int = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__magic_name__ :Tuple = layer_type
__magic_name__ :Union[str, Any] = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ :Any = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
@slow
def A ( self ):
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ :Tuple = TFResNetModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def A ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__magic_name__ :List[str] = self.default_image_processor
__magic_name__ :Optional[Any] = prepare_img()
__magic_name__ :List[str] = image_processor(images=__lowerCAmelCase , return_tensors='''tf''' )
# forward pass
__magic_name__ :Tuple = model(**__lowerCAmelCase )
# verify the logits
__magic_name__ :str = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
__magic_name__ :str = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __lowerCAmelCase , atol=1E-4 ) )
| 701 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Optional[Any] = DPTConfig()
if "large" in checkpoint_url:
__magic_name__ :str = 1_0_2_4
__magic_name__ :Tuple = 4_0_9_6
__magic_name__ :Union[str, Any] = 2_4
__magic_name__ :int = 1_6
__magic_name__ :Dict = [5, 1_1, 1_7, 2_3]
__magic_name__ :Optional[int] = [2_5_6, 5_1_2, 1_0_2_4, 1_0_2_4]
__magic_name__ :Union[str, Any] = (1, 3_8_4, 3_8_4)
if "ade" in checkpoint_url:
__magic_name__ :Tuple = True
__magic_name__ :int = 1_5_0
__magic_name__ :List[Any] = '''huggingface/label-files'''
__magic_name__ :Dict = '''ade20k-id2label.json'''
__magic_name__ :str = json.load(open(cached_download(hf_hub_url(snake_case, snake_case, repo_type='''dataset''' ) ), '''r''' ) )
__magic_name__ :Optional[int] = {int(snake_case ): v for k, v in idalabel.items()}
__magic_name__ :Any = idalabel
__magic_name__ :List[str] = {v: k for k, v in idalabel.items()}
__magic_name__ :Optional[Any] = [1, 1_5_0, 4_8_0, 4_8_0]
return config, expected_shape
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Optional[Any] = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(snake_case, snake_case )
def __lowercase ( snake_case ):
"""simple docstring"""
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__magic_name__ :int = name.replace('''pretrained.model''', '''dpt.encoder''' )
if "pretrained.model" in name:
__magic_name__ :str = name.replace('''pretrained.model''', '''dpt.embeddings''' )
if "patch_embed" in name:
__magic_name__ :Optional[Any] = name.replace('''patch_embed''', '''patch_embeddings''' )
if "pos_embed" in name:
__magic_name__ :List[Any] = name.replace('''pos_embed''', '''position_embeddings''' )
if "attn.proj" in name:
__magic_name__ :List[Any] = name.replace('''attn.proj''', '''attention.output.dense''' )
if "proj" in name and "project" not in name:
__magic_name__ :Dict = name.replace('''proj''', '''projection''' )
if "blocks" in name:
__magic_name__ :str = name.replace('''blocks''', '''layer''' )
if "mlp.fc1" in name:
__magic_name__ :Dict = name.replace('''mlp.fc1''', '''intermediate.dense''' )
if "mlp.fc2" in name:
__magic_name__ :Tuple = name.replace('''mlp.fc2''', '''output.dense''' )
if "norm1" in name:
__magic_name__ :List[Any] = name.replace('''norm1''', '''layernorm_before''' )
if "norm2" in name:
__magic_name__ :Union[str, Any] = name.replace('''norm2''', '''layernorm_after''' )
if "scratch.output_conv" in name:
__magic_name__ :Any = name.replace('''scratch.output_conv''', '''head''' )
if "scratch" in name:
__magic_name__ :str = name.replace('''scratch''', '''neck''' )
if "layer1_rn" in name:
__magic_name__ :Union[str, Any] = name.replace('''layer1_rn''', '''convs.0''' )
if "layer2_rn" in name:
__magic_name__ :int = name.replace('''layer2_rn''', '''convs.1''' )
if "layer3_rn" in name:
__magic_name__ :str = name.replace('''layer3_rn''', '''convs.2''' )
if "layer4_rn" in name:
__magic_name__ :Optional[Any] = name.replace('''layer4_rn''', '''convs.3''' )
if "refinenet" in name:
__magic_name__ :int = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__magic_name__ :List[Any] = name.replace(f'''refinenet{layer_idx}''', f'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
__magic_name__ :str = name.replace('''out_conv''', '''projection''' )
if "resConfUnit1" in name:
__magic_name__ :Dict = name.replace('''resConfUnit1''', '''residual_layer1''' )
if "resConfUnit2" in name:
__magic_name__ :int = name.replace('''resConfUnit2''', '''residual_layer2''' )
if "conv1" in name:
__magic_name__ :str = name.replace('''conv1''', '''convolution1''' )
if "conv2" in name:
__magic_name__ :Union[str, Any] = name.replace('''conv2''', '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__magic_name__ :Optional[int] = name.replace('''pretrained.act_postprocess1.0.project.0''', '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
__magic_name__ :int = name.replace('''pretrained.act_postprocess2.0.project.0''', '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
__magic_name__ :Optional[int] = name.replace('''pretrained.act_postprocess3.0.project.0''', '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
__magic_name__ :str = name.replace('''pretrained.act_postprocess4.0.project.0''', '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__magic_name__ :Any = name.replace('''pretrained.act_postprocess1.3''', '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
__magic_name__ :Any = name.replace('''pretrained.act_postprocess1.4''', '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
__magic_name__ :Dict = name.replace('''pretrained.act_postprocess2.3''', '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
__magic_name__ :Optional[Any] = name.replace('''pretrained.act_postprocess2.4''', '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
__magic_name__ :List[str] = name.replace('''pretrained.act_postprocess3.3''', '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
__magic_name__ :Optional[Any] = name.replace('''pretrained.act_postprocess4.3''', '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
__magic_name__ :Optional[Any] = name.replace('''pretrained.act_postprocess4.4''', '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
__magic_name__ :Any = name.replace('''pretrained''', '''dpt''' )
if "bn" in name:
__magic_name__ :int = name.replace('''bn''', '''batch_norm''' )
if "head" in name:
__magic_name__ :str = name.replace('''head''', '''head.head''' )
if "encoder.norm" in name:
__magic_name__ :Union[str, Any] = name.replace('''encoder.norm''', '''layernorm''' )
if "auxlayer" in name:
__magic_name__ :Dict = name.replace('''auxlayer''', '''auxiliary_head.head''' )
return name
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__magic_name__ :int = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
__magic_name__ :Union[str, Any] = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ :Optional[Any] = in_proj_weight[: config.hidden_size, :]
__magic_name__ :str = in_proj_bias[: config.hidden_size]
__magic_name__ :List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__magic_name__ :Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__magic_name__ :int = in_proj_weight[
-config.hidden_size :, :
]
__magic_name__ :Union[str, Any] = in_proj_bias[-config.hidden_size :]
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__magic_name__ :Union[str, Any] = Image.open(requests.get(snake_case, stream=snake_case ).raw )
return im
@torch.no_grad()
def __lowercase ( snake_case, snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ , __magic_name__ :Any = get_dpt_config(snake_case )
# load original state_dict from URL
__magic_name__ :List[str] = torch.hub.load_state_dict_from_url(snake_case, map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(snake_case )
# rename keys
for key in state_dict.copy().keys():
__magic_name__ :int = state_dict.pop(snake_case )
__magic_name__ :Tuple = val
# read in qkv matrices
read_in_q_k_v(snake_case, snake_case )
# load HuggingFace model
__magic_name__ :int = DPTForSemanticSegmentation(snake_case ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(snake_case )
model.load_state_dict(snake_case )
model.eval()
# Check outputs on an image
__magic_name__ :Union[str, Any] = 4_8_0 if '''ade''' in checkpoint_url else 3_8_4
__magic_name__ :int = DPTImageProcessor(size=snake_case )
__magic_name__ :Dict = prepare_img()
__magic_name__ :List[str] = image_processor(snake_case, return_tensors='''pt''' )
# forward pass
__magic_name__ :Dict = model(**snake_case ).logits if '''ade''' in checkpoint_url else model(**snake_case ).predicted_depth
# Assert logits
__magic_name__ :int = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] )
if "ade" in checkpoint_url:
__magic_name__ :Any = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] )
assert outputs.shape == torch.Size(snake_case )
assert (
torch.allclose(outputs[0, 0, :3, :3], snake_case, atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3], snake_case )
)
Path(snake_case ).mkdir(exist_ok=snake_case )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case )
if push_to_hub:
print('''Pushing model to hub...''' )
model.push_to_hub(
repo_path_or_name=Path(snake_case, snake_case ), organization='''nielsr''', commit_message='''Add model''', use_temp_dir=snake_case, )
image_processor.push_to_hub(
repo_path_or_name=Path(snake_case, snake_case ), organization='''nielsr''', commit_message='''Add image processor''', use_temp_dir=snake_case, )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
SCREAMING_SNAKE_CASE__ : int = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 180 | 0 |
'''simple docstring'''
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def snake_case_ (self ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(lowerCAmelCase__ ):
_UpperCAmelCase : Tuple = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Tuple = FlaxAutoModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def snake_case_ (self ):
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(lowerCAmelCase__ ):
_UpperCAmelCase : Any = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = FlaxAutoModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def snake_case_ (self ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
_UpperCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
_UpperCAmelCase : int = FlaxBertModel.from_pretrained(lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = tokenizer("""Do you support jax jitted function?""" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**lowerCAmelCase__ ):
return model(**lowerCAmelCase__ )
eval(**lowerCAmelCase__ ).block_until_ready()
@slow
def snake_case_ (self ):
for model_name in ["roberta-base", "roberta-large"]:
_UpperCAmelCase : int = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
_UpperCAmelCase : Any = FlaxRobertaModel.from_pretrained(lowerCAmelCase__ )
_UpperCAmelCase : List[str] = tokenizer("""Do you support jax jitted function?""" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**lowerCAmelCase__ ):
return model(**lowerCAmelCase__ )
eval(**lowerCAmelCase__ ).block_until_ready()
def snake_case_ (self ):
with self.assertRaisesRegex(
lowerCAmelCase__ , """bert-base is not a local folder and is not a valid model identifier""" ):
_UpperCAmelCase : str = FlaxAutoModel.from_pretrained("""bert-base""" )
def snake_case_ (self ):
with self.assertRaisesRegex(
lowerCAmelCase__ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
_UpperCAmelCase : str = FlaxAutoModel.from_pretrained(lowerCAmelCase__ , revision="""aaaaaa""" )
def snake_case_ (self ):
with self.assertRaisesRegex(
lowerCAmelCase__ , """hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack""" , ):
_UpperCAmelCase : Dict = FlaxAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" )
def snake_case_ (self ):
with self.assertRaisesRegex(lowerCAmelCase__ , """Use `from_pt=True` to load this model""" ):
_UpperCAmelCase : Optional[int] = FlaxAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
| 414 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(__a )
class __lowerCAmelCase ( __a ):
def __init__(self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def snake_case_ (self , lowerCAmelCase__=None ):
_UpperCAmelCase : Any = {}
if top_k is not None:
_UpperCAmelCase : Tuple = top_k
return {}, {}, postprocess_params
def __call__(self , lowerCAmelCase__ , **lowerCAmelCase__ ):
return super().__call__(lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case_ (self , lowerCAmelCase__ ):
_UpperCAmelCase : Union[str, Any] = load_image(lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = self.image_processor(images=lowerCAmelCase__ , return_tensors=self.framework )
return model_inputs
def snake_case_ (self , lowerCAmelCase__ ):
_UpperCAmelCase : Any = self.model(**lowerCAmelCase__ )
return model_outputs
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__=5 ):
if top_k > self.model.config.num_labels:
_UpperCAmelCase : Dict = self.model.config.num_labels
if self.framework == "pt":
_UpperCAmelCase : Tuple = model_outputs.logits.softmax(-1 )[0]
_UpperCAmelCase , _UpperCAmelCase : Dict = probs.topk(lowerCAmelCase__ )
elif self.framework == "tf":
_UpperCAmelCase : Dict = stable_softmax(model_outputs.logits , axis=-1 )[0]
_UpperCAmelCase : Any = tf.math.top_k(lowerCAmelCase__ , k=lowerCAmelCase__ )
_UpperCAmelCase , _UpperCAmelCase : str = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
_UpperCAmelCase : str = scores.tolist()
_UpperCAmelCase : Tuple = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCAmelCase__ , lowerCAmelCase__ )]
| 414 | 1 |
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ :
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=128 , _a=32 , _a=16 , _a=2 , _a=0.0_2 , _a=3 , _a=4 , _a=None , ) -> int:
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = seq_length
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_input_mask
lowerCAmelCase_ = use_token_type_ids
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = type_sequence_label_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = num_labels
lowerCAmelCase_ = num_choices
lowerCAmelCase_ = scope
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ = None
if self.use_input_mask:
lowerCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ = None
if self.use_token_type_ids:
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = None
if self.use_labels:
lowerCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self ) -> str:
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
def __a ( self ) -> int:
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) = self.prepare_config_and_inputs()
lowerCAmelCase_ = True
lowerCAmelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __a ( self , _a , _a , _a , _a , _a , _a , _a ) -> List[Any]:
lowerCAmelCase_ = NezhaModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowerCAmelCase_ = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase )
lowerCAmelCase_ = model(_lowerCamelCase , token_type_ids=_lowerCamelCase )
lowerCAmelCase_ = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __a ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> Union[str, Any]:
lowerCAmelCase_ = True
lowerCAmelCase_ = NezhaModel(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowerCAmelCase_ = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , encoder_attention_mask=_lowerCamelCase , )
lowerCAmelCase_ = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , )
lowerCAmelCase_ = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Dict:
lowerCAmelCase_ = NezhaForMaskedLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowerCAmelCase_ = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Tuple:
lowerCAmelCase_ = NezhaForNextSentencePrediction(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowerCAmelCase_ = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Dict:
lowerCAmelCase_ = NezhaForPreTraining(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowerCAmelCase_ = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase , next_sentence_label=_lowerCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Optional[int]:
lowerCAmelCase_ = NezhaForQuestionAnswering(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowerCAmelCase_ = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , start_positions=_lowerCamelCase , end_positions=_lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self , _a , _a , _a , _a , _a , _a , _a ) -> List[Any]:
lowerCAmelCase_ = self.num_labels
lowerCAmelCase_ = NezhaForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowerCAmelCase_ = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Optional[int]:
lowerCAmelCase_ = self.num_labels
lowerCAmelCase_ = NezhaForTokenClassification(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowerCAmelCase_ = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self , _a , _a , _a , _a , _a , _a , _a ) -> str:
lowerCAmelCase_ = self.num_choices
lowerCAmelCase_ = NezhaForMultipleChoice(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowerCAmelCase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) = config_and_inputs
lowerCAmelCase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowerCamelCase__ = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
"feature-extraction": NezhaModel,
"fill-mask": NezhaForMaskedLM,
"question-answering": NezhaForQuestionAnswering,
"text-classification": NezhaForSequenceClassification,
"token-classification": NezhaForTokenClassification,
"zero-shot": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = True
def __a ( self , _a , _a , _a=False ) -> Union[str, Any]:
lowerCAmelCase_ = super()._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
if return_labels:
if model_class in get_values(_lowerCamelCase ):
lowerCAmelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_lowerCamelCase )
lowerCAmelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCamelCase )
return inputs_dict
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = NezhaModelTester(self )
lowerCAmelCase_ = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=37 )
def __a ( self ) -> int:
self.config_tester.run_common_tests()
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_lowerCamelCase )
def __a ( self ) -> Dict:
# This regression test was failing with PyTorch < 1.3
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCAmelCase_ = None
self.model_tester.create_and_check_model_as_decoder(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , )
def __a ( self ) -> str:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCamelCase )
def __a ( self ) -> int:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowerCamelCase )
def __a ( self ) -> int:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*_lowerCamelCase )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowerCamelCase )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCamelCase )
def __a ( self ) -> Dict:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCamelCase )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCamelCase )
@slow
def __a ( self ) -> Dict:
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ = NezhaModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
@slow
@require_torch_gpu
def __a ( self ) -> List[Any]:
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
lowerCAmelCase_ = True
lowerCAmelCase_ = model_class(config=_lowerCamelCase )
lowerCAmelCase_ = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
lowerCAmelCase_ = torch.jit.trace(
_lowerCamelCase , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_lowerCamelCase , os.path.join(_lowerCamelCase , "bert.pt" ) )
lowerCAmelCase_ = torch.jit.load(os.path.join(_lowerCamelCase , "bert.pt" ) , map_location=_lowerCamelCase )
loaded(inputs_dict["input_ids"].to(_lowerCamelCase ) , inputs_dict["attention_mask"].to(_lowerCamelCase ) )
@require_torch
class __magic_name__ (unittest.TestCase ):
@slow
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = NezhaModel.from_pretrained("sijunhe/nezha-cn-base" )
lowerCAmelCase_ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase_ = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase_ = model(_lowerCamelCase , attention_mask=_lowerCamelCase )[0]
lowerCAmelCase_ = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , _lowerCamelCase )
lowerCAmelCase_ = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _lowerCamelCase , atol=1E-4 ) )
@slow
def __a ( self ) -> Tuple:
lowerCAmelCase_ = NezhaForMaskedLM.from_pretrained("sijunhe/nezha-cn-base" )
lowerCAmelCase_ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase_ = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase_ = model(_lowerCamelCase , attention_mask=_lowerCamelCase )[0]
lowerCAmelCase_ = torch.Size((1, 6, 21128) )
self.assertEqual(output.shape , _lowerCamelCase )
lowerCAmelCase_ = torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _lowerCamelCase , atol=1E-4 ) )
| 701 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class __magic_name__ (__lowercase ):
lowerCamelCase__ = ''''''
lowerCamelCase__ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
lowerCamelCase__ = None # compression type in fsspec. ex: "gzip"
lowerCamelCase__ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self , _a = "" , _a = None , _a = None , **_a ) -> Any:
super().__init__(self , **_a )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCAmelCase_ = fsspec.open(
_a , mode="rb" , protocol=_a , compression=self.compression , client_kwargs={
"requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
"trust_env": True, # Enable reading proxy env variables.
**(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowerCAmelCase_ = os.path.basename(self.file.path.split("::" )[0] )
lowerCAmelCase_ = (
self.compressed_name[: self.compressed_name.rindex("." )]
if "." in self.compressed_name
else self.compressed_name
)
lowerCAmelCase_ = None
@classmethod
def __a ( cls , _a ) -> List[Any]:
# compressed file paths are always relative to the archive root
return super()._strip_protocol(_a ).lstrip("/" )
def __a ( self ) -> Union[str, Any]:
if self.dir_cache is None:
lowerCAmelCase_ = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name}
lowerCAmelCase_ = {f["name"]: f}
def __a ( self , _a ) -> Dict:
return self.file.open().read()
def __a ( self , _a , _a = "rb" , _a=None , _a=True , _a=None , **_a , ) -> Optional[Any]:
lowerCAmelCase_ = self._strip_protocol(_a )
if mode != "rb":
raise ValueError(f"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'" )
return self.file.open()
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''bz2'''
lowerCamelCase__ = '''bz2'''
lowerCamelCase__ = '''.bz2'''
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''gzip'''
lowerCamelCase__ = '''gzip'''
lowerCamelCase__ = '''.gz'''
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''lz4'''
lowerCamelCase__ = '''lz4'''
lowerCamelCase__ = '''.lz4'''
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''xz'''
lowerCamelCase__ = '''xz'''
lowerCamelCase__ = '''.xz'''
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''zstd'''
lowerCamelCase__ = '''zstd'''
lowerCamelCase__ = '''.zst'''
def __init__( self , _a , _a = "rb" , _a = None , _a = None , _a = DEFAULT_BLOCK_SIZE , **_a , ) -> Tuple:
super().__init__(
fo=_a , mode=_a , target_protocol=_a , target_options=_a , block_size=_a , **_a , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCAmelCase_ = self.file.__enter__
class __magic_name__ :
def __init__( self , _a ) -> List[str]:
lowerCAmelCase_ = file_
def __enter__( self ) -> int:
self._file.__enter__()
return self
def __exit__( self , *_a , **_a ) -> Dict:
self._file.__exit__(*_a , **_a )
def __iter__( self ) -> List[Any]:
return iter(self._file )
def __a ( self ) -> List[Any]:
return next(self._file )
def __getattr__( self , _a ) -> Tuple:
return getattr(self._file , _a )
def fixed_enter(*_a , **_a ):
return WrappedFile(_enter(*_a , **_a ) )
lowerCAmelCase_ = fixed_enter
| 226 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : str ={"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : int =[
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
A_ : List[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 274 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=7 , __snake_case=3 , __snake_case=3_0 , __snake_case=4_0_0 , __snake_case=True , __snake_case=None , __snake_case=True , __snake_case=1 / 2_5_5 , __snake_case=True , __snake_case=[0.5, 0.5, 0.5] , __snake_case=[0.5, 0.5, 0.5] , __snake_case=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case = size if size is not None else {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3}
snake_case = parent
snake_case = batch_size
snake_case = num_channels
snake_case = min_resolution
snake_case = max_resolution
snake_case = do_resize
snake_case = size
snake_case = do_rescale
snake_case = rescale_factor
snake_case = do_normalize
snake_case = image_mean
snake_case = image_std
snake_case = do_pad
def a_ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def a_ ( self , __snake_case , __snake_case=False ):
if not batched:
snake_case = image_inputs[0]
if isinstance(__snake_case , Image.Image ):
snake_case , snake_case = image.size
else:
snake_case , snake_case = image.shape[1], image.shape[2]
if w < h:
snake_case = int(self.size['''shortest_edge'''] * h / w )
snake_case = self.size['''shortest_edge''']
elif w > h:
snake_case = self.size['''shortest_edge''']
snake_case = int(self.size['''shortest_edge'''] * w / h )
else:
snake_case = self.size['''shortest_edge''']
snake_case = self.size['''shortest_edge''']
else:
snake_case = []
for image in image_inputs:
snake_case , snake_case = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case = max(__snake_case , key=lambda __snake_case : item[0] )[0]
snake_case = max(__snake_case , key=lambda __snake_case : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A__ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = DetrImageProcessor if is_vision_available() else None
def a_ ( self ):
snake_case = DetrImageProcessingTester(self )
@property
def a_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self ):
snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , '''image_mean''' ) )
self.assertTrue(hasattr(__snake_case , '''image_std''' ) )
self.assertTrue(hasattr(__snake_case , '''do_normalize''' ) )
self.assertTrue(hasattr(__snake_case , '''do_rescale''' ) )
self.assertTrue(hasattr(__snake_case , '''rescale_factor''' ) )
self.assertTrue(hasattr(__snake_case , '''do_resize''' ) )
self.assertTrue(hasattr(__snake_case , '''size''' ) )
self.assertTrue(hasattr(__snake_case , '''do_pad''' ) )
def a_ ( self ):
snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __snake_case )
snake_case = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__snake_case )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2, '''longest_edge''': 8_4} )
self.assertEqual(image_processor.do_pad , __snake_case )
def a_ ( self ):
pass
def a_ ( self ):
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
snake_case , snake_case = self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case , snake_case = self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
snake_case = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a_ ( self ):
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
snake_case , snake_case = self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
snake_case , snake_case = self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a_ ( self ):
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
snake_case , snake_case = self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
snake_case , snake_case = self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def a_ ( self ):
# prepare image and target
snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
snake_case = json.loads(f.read() )
snake_case = {'''image_id''': 3_9_7_6_9, '''annotations''': target}
# encode them
snake_case = DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50''' )
snake_case = image_processing(images=__snake_case , annotations=__snake_case , return_tensors='''pt''' )
# verify pixel values
snake_case = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , __snake_case )
snake_case = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __snake_case , atol=1E-4 ) )
# verify area
snake_case = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __snake_case ) )
# verify boxes
snake_case = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __snake_case )
snake_case = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __snake_case , atol=1E-3 ) )
# verify image_id
snake_case = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __snake_case ) )
# verify is_crowd
snake_case = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __snake_case ) )
# verify class_labels
snake_case = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __snake_case ) )
# verify orig_size
snake_case = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __snake_case ) )
# verify size
snake_case = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __snake_case ) )
@slow
def a_ ( self ):
# prepare image, target and masks_path
snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
snake_case = json.loads(f.read() )
snake_case = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9_7_6_9, '''segments_info''': target}
snake_case = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
snake_case = DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50-panoptic''' )
snake_case = image_processing(images=__snake_case , annotations=__snake_case , masks_path=__snake_case , return_tensors='''pt''' )
# verify pixel values
snake_case = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , __snake_case )
snake_case = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __snake_case , atol=1E-4 ) )
# verify area
snake_case = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __snake_case ) )
# verify boxes
snake_case = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __snake_case )
snake_case = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __snake_case , atol=1E-3 ) )
# verify image_id
snake_case = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __snake_case ) )
# verify is_crowd
snake_case = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __snake_case ) )
# verify class_labels
snake_case = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __snake_case ) )
# verify masks
snake_case = 8_2_2_8_7_3
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __snake_case )
# verify orig_size
snake_case = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __snake_case ) )
# verify size
snake_case = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __snake_case ) )
| 550 | 0 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
snake_case_ = (7_2_0, 1_2_8_0) # Height, Width
snake_case_ = (0.4, 0.6) # if height or width lower than this scale, drop it.
snake_case_ = 1 / 1_0_0
snake_case_ = ''
snake_case_ = ''
snake_case_ = ''
snake_case_ = 2_5_0
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Tuple = get_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for index in range(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : str = random.sample(range(len(SCREAMING_SNAKE_CASE_ ) ) , 4 )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Tuple = update_image_and_anno(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , filter_scale=SCREAMING_SNAKE_CASE_ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
SCREAMING_SNAKE_CASE_ : int = random_chars(3_2 )
SCREAMING_SNAKE_CASE_ : List[str] = path.split(os.sep )[-1].rsplit("." , 1 )[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = F"{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"
cva.imwrite(F"{file_root}.jpg" , SCREAMING_SNAKE_CASE_ , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(F"Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}" )
SCREAMING_SNAKE_CASE_ : List[str] = []
for anno in new_annos:
SCREAMING_SNAKE_CASE_ : Dict = anno[3] - anno[1]
SCREAMING_SNAKE_CASE_ : Optional[int] = anno[4] - anno[2]
SCREAMING_SNAKE_CASE_ : Optional[Any] = anno[1] + width / 2
SCREAMING_SNAKE_CASE_ : Tuple = anno[2] + height / 2
SCREAMING_SNAKE_CASE_ : int = F"{anno[0]} {x_center} {y_center} {width} {height}"
annos_list.append(SCREAMING_SNAKE_CASE_ )
with open(F"{file_root}.txt" , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ) -> tuple[list, list]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = []
SCREAMING_SNAKE_CASE_ : Any = []
for label_file in glob.glob(os.path.join(SCREAMING_SNAKE_CASE_ , "*.txt" ) ):
SCREAMING_SNAKE_CASE_ : List[str] = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(SCREAMING_SNAKE_CASE_ ) as in_file:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = in_file.readlines()
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(SCREAMING_SNAKE_CASE_ , F"{label_name}.jpg" )
SCREAMING_SNAKE_CASE_ : int = []
for obj_list in obj_lists:
SCREAMING_SNAKE_CASE_ : Optional[Any] = obj_list.rstrip("\n" ).split(" " )
SCREAMING_SNAKE_CASE_ : Tuple = float(obj[1] ) - float(obj[3] ) / 2
SCREAMING_SNAKE_CASE_ : List[Any] = float(obj[2] ) - float(obj[4] ) / 2
SCREAMING_SNAKE_CASE_ : Any = float(obj[1] ) + float(obj[3] ) / 2
SCREAMING_SNAKE_CASE_ : Any = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(SCREAMING_SNAKE_CASE_ )
labels.append(SCREAMING_SNAKE_CASE_ )
return img_paths, labels
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : tuple[int, int] , SCREAMING_SNAKE_CASE_ : tuple[float, float] , SCREAMING_SNAKE_CASE_ : float = 0.0 , ) -> tuple[list, list, str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
SCREAMING_SNAKE_CASE_ : Optional[int] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
SCREAMING_SNAKE_CASE_ : Any = int(scale_x * output_size[1] )
SCREAMING_SNAKE_CASE_ : str = int(scale_y * output_size[0] )
SCREAMING_SNAKE_CASE_ : Dict = []
SCREAMING_SNAKE_CASE_ : List[str] = []
for i, index in enumerate(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : List[Any] = all_img_list[index]
path_list.append(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Tuple = all_annos[index]
SCREAMING_SNAKE_CASE_ : int = cva.imread(SCREAMING_SNAKE_CASE_ )
if i == 0: # top-left
SCREAMING_SNAKE_CASE_ : Optional[Any] = cva.resize(SCREAMING_SNAKE_CASE_ , (divid_point_x, divid_point_y) )
SCREAMING_SNAKE_CASE_ : int = img
for bbox in img_annos:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = bbox[1] * scale_x
SCREAMING_SNAKE_CASE_ : List[str] = bbox[2] * scale_y
SCREAMING_SNAKE_CASE_ : Union[str, Any] = bbox[3] * scale_x
SCREAMING_SNAKE_CASE_ : Tuple = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
SCREAMING_SNAKE_CASE_ : Tuple = cva.resize(SCREAMING_SNAKE_CASE_ , (output_size[1] - divid_point_x, divid_point_y) )
SCREAMING_SNAKE_CASE_ : Tuple = img
for bbox in img_annos:
SCREAMING_SNAKE_CASE_ : Any = scale_x + bbox[1] * (1 - scale_x)
SCREAMING_SNAKE_CASE_ : Optional[Any] = bbox[2] * scale_y
SCREAMING_SNAKE_CASE_ : Optional[int] = scale_x + bbox[3] * (1 - scale_x)
SCREAMING_SNAKE_CASE_ : Dict = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
SCREAMING_SNAKE_CASE_ : List[str] = cva.resize(SCREAMING_SNAKE_CASE_ , (divid_point_x, output_size[0] - divid_point_y) )
SCREAMING_SNAKE_CASE_ : int = img
for bbox in img_annos:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = bbox[1] * scale_x
SCREAMING_SNAKE_CASE_ : Optional[Any] = scale_y + bbox[2] * (1 - scale_y)
SCREAMING_SNAKE_CASE_ : List[Any] = bbox[3] * scale_x
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
SCREAMING_SNAKE_CASE_ : Optional[Any] = cva.resize(
SCREAMING_SNAKE_CASE_ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
SCREAMING_SNAKE_CASE_ : int = img
for bbox in img_annos:
SCREAMING_SNAKE_CASE_ : int = scale_x + bbox[1] * (1 - scale_x)
SCREAMING_SNAKE_CASE_ : List[Any] = scale_y + bbox[2] * (1 - scale_y)
SCREAMING_SNAKE_CASE_ : Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
SCREAMING_SNAKE_CASE_ : List[Any] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
SCREAMING_SNAKE_CASE_ : Any = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int ) -> str:
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
SCREAMING_SNAKE_CASE_ : Tuple = ascii_lowercase + digits
return "".join(random.choice(SCREAMING_SNAKE_CASE_ ) for _ in range(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
main()
print('DONE ✅')
| 68 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : NDArray[floataa] , SCREAMING_SNAKE_CASE_ : NDArray[floataa] , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int , ) -> list[float]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : str = coefficient_matrix.shape
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : List[Any] = constant_matrix.shape
if rowsa != colsa:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"
raise ValueError(SCREAMING_SNAKE_CASE_ )
if colsa != 1:
SCREAMING_SNAKE_CASE_ : List[Any] = F"Constant matrix must be nx1 but received {rowsa}x{colsa}"
raise ValueError(SCREAMING_SNAKE_CASE_ )
if rowsa != rowsa:
SCREAMING_SNAKE_CASE_ : Any = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
F"received {rowsa}x{colsa} and {rowsa}x{colsa}"
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) != rowsa:
SCREAMING_SNAKE_CASE_ : int = (
"Number of initial values must be equal to number of rows in coefficient "
F"matrix but received {len(SCREAMING_SNAKE_CASE_ )} and {rowsa}"
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
SCREAMING_SNAKE_CASE_ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Any = table.shape
strictly_diagonally_dominant(SCREAMING_SNAKE_CASE_ )
# Iterates the whole matrix for given number of times
for _ in range(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : Tuple = []
for row in range(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : Any = 0
for col in range(SCREAMING_SNAKE_CASE_ ):
if col == row:
SCREAMING_SNAKE_CASE_ : Any = table[row][col]
elif col == cols - 1:
SCREAMING_SNAKE_CASE_ : Dict = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
SCREAMING_SNAKE_CASE_ : Optional[Any] = (temp + val) / denom
new_val.append(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = new_val
return [float(SCREAMING_SNAKE_CASE_ ) for i in new_val]
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : NDArray[floataa] ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = table.shape
SCREAMING_SNAKE_CASE_ : Tuple = True
for i in range(0 , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : int = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68 | 1 |
from __future__ import annotations
lowerCamelCase_ : int = """Muhammad Umer Farooq"""
lowerCamelCase_ : Optional[int] = """MIT"""
lowerCamelCase_ : List[str] = """1.0.0"""
lowerCamelCase_ : Tuple = """Muhammad Umer Farooq"""
lowerCamelCase_ : List[Any] = """[email protected]"""
lowerCamelCase_ : Dict = """Alpha"""
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class a__ ( __snake_case ):
def __init__( self , UpperCAmelCase ) -> None:
super().__init__()
__a = []
__a = domain
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase ) -> None:
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__a = parse.urljoin(self.domain , UpperCAmelCase )
self.urls.append(UpperCAmelCase )
def lowerCAmelCase( __lowerCamelCase ):
return ".".join(get_sub_domain_name(__lowerCamelCase ).split('.' )[-2:] )
def lowerCAmelCase( __lowerCamelCase ):
return parse.urlparse(__lowerCamelCase ).netloc
def lowerCAmelCase( __lowerCamelCase = "https://github.com" ):
__a = get_domain_name(__lowerCamelCase )
# Initialize the parser
__a = Parser(__lowerCamelCase )
try:
# Open URL
__a = requests.get(__lowerCamelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__a = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__a = requests.get(__lowerCamelCase )
# Get the valid email.
__a = re.findall('[a-zA-Z0-9]+@' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(__lowerCamelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(__lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : Optional[Any] = emails_from_url("""https://github.com""")
print(F'''{len(emails)} emails found:''')
print("""\n""".join(sorted(emails)))
| 559 | def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ):
# Check if the input is valid
if not len(__lowerCamelCase ) == len(__lowerCamelCase ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
__a , __a , __a = equationa
__a , __a , __a = equationa
# Calculate the determinants of the matrices
__a = aa * ba - aa * ba
__a = ca * ba - ca * ba
__a = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
__a = determinant_x / determinant
__a = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 559 | 1 |
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] ) -> str:
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
SCREAMING_SNAKE_CASE_ : int =mf_knapsack(i - 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
SCREAMING_SNAKE_CASE_ : Any =max(
mf_knapsack(i - 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , mf_knapsack(i - 1 , _lowerCamelCase , _lowerCamelCase , j - wt[i - 1] ) + val[i - 1] , )
SCREAMING_SNAKE_CASE_ : List[Any] =val
return f[i][j]
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] ) -> List[str]:
SCREAMING_SNAKE_CASE_ : str =[[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
SCREAMING_SNAKE_CASE_ : Any =dp[i - 1][w_]
return dp[n][w_], dp
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] ) -> List[Any]:
if not (isinstance(_lowerCamelCase , (list, tuple) ) and isinstance(_lowerCamelCase , (list, tuple) )):
raise ValueError(
'''Both the weights and values vectors must be either lists or tuples''' )
SCREAMING_SNAKE_CASE_ : List[str] =len(_lowerCamelCase )
if num_items != len(_lowerCamelCase ):
SCREAMING_SNAKE_CASE_ : List[Any] =(
"The number of weights must be the same as the number of values.\n"
f'But got {num_items} weights and {len(_lowerCamelCase )} values'
)
raise ValueError(_lowerCamelCase )
for i in range(_lowerCamelCase ):
if not isinstance(wt[i] , _lowerCamelCase ):
SCREAMING_SNAKE_CASE_ : str =(
"All weights must be integers but got weight of "
f'type {type(wt[i] )} at index {i}'
)
raise TypeError(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] =knapsack(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE_ : set =set()
_construct_solution(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return optimal_val, example_optional_set
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] ) -> Tuple:
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(_lowerCamelCase , _lowerCamelCase , i - 1 , _lowerCamelCase , _lowerCamelCase )
else:
optimal_set.add(_lowerCamelCase )
_construct_solution(_lowerCamelCase , _lowerCamelCase , i - 1 , j - wt[i - 1] , _lowerCamelCase )
if __name__ == "__main__":
_lowercase = [3, 2, 4, 4]
_lowercase = [4, 3, 2, 3]
_lowercase = 4
_lowercase = 6
_lowercase = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
_lowercase , _lowercase = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
_lowercase , _lowercase = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("""optimal_value = """, optimal_solution)
print("""An optimal subset corresponding to the optimal value""", optimal_subset)
| 717 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_lowercase = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Union[str, Any] ) -> Optional[int]:
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : List[Any] ) -> List[str]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Dict ) -> Optional[int]:
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE_ : List[Any] =terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(UpperCAmelCase_ , id=UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] ) -> int:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =0
# Doctest custom flag to ignore output.
_lowercase = doctest.register_optionflag("""IGNORE_RESULT""")
_lowercase = doctest.OutputChecker
class lowercase_ ( A ):
def _snake_case ( self , __A , __A , __A ) -> List[Any]:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , __A , __A , __A )
_lowercase = CustomOutputChecker
_lowercase = HfDoctestModule
_lowercase = HfDocTestParser
| 431 | 0 |
from __future__ import annotations
import math
def _lowercase ( UpperCamelCase_ ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCamelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowercase ( UpperCamelCase_ ) -> list[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = str(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = [n]
for i in range(1 , len(UpperCamelCase_ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def _lowercase ( UpperCamelCase_ ) -> bool:
'''simple docstring'''
if len(str(UpperCamelCase_ ) ) > 3:
if not is_prime(int(str(UpperCamelCase_ )[-3:] ) ) or not is_prime(int(str(UpperCamelCase_ )[:3] ) ):
return False
return True
def _lowercase ( UpperCamelCase_ = 11 ) -> list[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 13
while len(UpperCamelCase_ ) != count:
if validate(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = list_truncated_nums(UpperCamelCase_ )
if all(is_prime(UpperCamelCase_ ) for i in list_nums ):
list_truncated_primes.append(UpperCamelCase_ )
num += 2
return list_truncated_primes
def _lowercase ( ) -> int:
'''simple docstring'''
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F"""{sum(compute_truncated_primes(11)) = }""")
| 472 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__snake_case = logging.get_logger(__name__)
__snake_case = {"""vocab_file""": """spiece.model"""}
__snake_case = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
}
}
__snake_case = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
# Segments (not really needed)
__snake_case = 0
__snake_case = 1
__snake_case = 2
__snake_case = 3
__snake_case = 4
class lowercase__ ( _UpperCAmelCase ):
A__ : Union[str, Any] =VOCAB_FILES_NAMES
A__ : int =PRETRAINED_VOCAB_FILES_MAP
A__ : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : List[str] ="""left"""
def __init__( self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : str="<s>" , UpperCAmelCase_ : List[str]="</s>" , UpperCAmelCase_ : List[Any]="<unk>" , UpperCAmelCase_ : Any="<sep>" , UpperCAmelCase_ : Dict="<pad>" , UpperCAmelCase_ : List[str]="<cls>" , UpperCAmelCase_ : List[str]="<mask>" , UpperCAmelCase_ : str=["<eop>", "<eod>"] , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , **UpperCAmelCase_ : Optional[Any] , ):
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token
SCREAMING_SNAKE_CASE__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCAmelCase_ , remove_space=UpperCAmelCase_ , keep_accents=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = do_lower_case
SCREAMING_SNAKE_CASE__ = remove_space
SCREAMING_SNAKE_CASE__ = keep_accents
SCREAMING_SNAKE_CASE__ = vocab_file
SCREAMING_SNAKE_CASE__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase_ )
@property
def A_ ( self : List[Any] ):
return len(self.sp_model )
def A_ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ):
SCREAMING_SNAKE_CASE__ = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ = None
return state
def __setstate__( self : Optional[Any] , UpperCAmelCase_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A_ ( self : Optional[Any] , UpperCAmelCase_ : int ):
if self.remove_space:
SCREAMING_SNAKE_CASE__ = ' '.join(inputs.strip().split() )
else:
SCREAMING_SNAKE_CASE__ = inputs
SCREAMING_SNAKE_CASE__ = outputs.replace('``' , '"' ).replace('\'\'' , '"' )
if not self.keep_accents:
SCREAMING_SNAKE_CASE__ = unicodedata.normalize('NFKD' , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = ''.join([c for c in outputs if not unicodedata.combining(UpperCAmelCase_ )] )
if self.do_lower_case:
SCREAMING_SNAKE_CASE__ = outputs.lower()
return outputs
def A_ ( self : List[str] , UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE__ = self.preprocess_text(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = []
for piece in pieces:
if len(UpperCAmelCase_ ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
SCREAMING_SNAKE_CASE__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase_ , '' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
SCREAMING_SNAKE_CASE__ = cur_pieces[1:]
else:
SCREAMING_SNAKE_CASE__ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCAmelCase_ )
else:
new_pieces.append(UpperCAmelCase_ )
return new_pieces
def A_ ( self : Any , UpperCAmelCase_ : Optional[Any] ):
return self.sp_model.PieceToId(UpperCAmelCase_ )
def A_ ( self : Tuple , UpperCAmelCase_ : Any ):
return self.sp_model.IdToPiece(UpperCAmelCase_ )
def A_ ( self : Any , UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE__ = ''.join(UpperCAmelCase_ ).replace(UpperCAmelCase_ , ' ' ).strip()
return out_string
def A_ ( self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : bool = True , **UpperCAmelCase_ : Optional[Any] , ):
SCREAMING_SNAKE_CASE__ = kwargs.pop('use_source_tokenizer' , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.convert_ids_to_tokens(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ = []
sub_texts.append(UpperCAmelCase_ )
else:
current_sub_text.append(UpperCAmelCase_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCAmelCase_ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
SCREAMING_SNAKE_CASE__ = ''.join(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
SCREAMING_SNAKE_CASE__ = self.clean_up_tokenization(UpperCAmelCase_ )
return clean_text
else:
return text
def A_ ( self : List[Any] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def A_ ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
if token_ids_a is not None:
return ([0] * len(UpperCAmelCase_ )) + [1] + ([0] * len(UpperCAmelCase_ )) + [1, 1]
return ([0] * len(UpperCAmelCase_ )) + [1, 1]
def A_ ( self : Tuple , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def A_ ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ):
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE__ = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase_ , 'wb' ) as fi:
SCREAMING_SNAKE_CASE__ = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_ )
return (out_vocab_file,)
| 472 | 1 |
def a__ ( A_ = 200 ):
'''simple docstring'''
__magic_name__ = [1, 2, 5, 10, 20, 50, 100, 200]
__magic_name__ = [0] * (pence + 1)
__magic_name__ = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowerCAmelCase__, pence + 1, 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 708 |
import collections
import importlib.util
import os
import re
from pathlib import Path
__lowerCAmelCase : int = 'src/transformers'
# Matches is_xxx_available()
__lowerCAmelCase : Optional[int] = re.compile(R'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
__lowerCAmelCase : Dict = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__lowerCAmelCase : int = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
__lowerCAmelCase : Optional[Any] = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
__lowerCAmelCase : Optional[Any] = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__lowerCAmelCase : Dict = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
__lowerCAmelCase : List[str] = re.compile('^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
__lowerCAmelCase : Optional[int] = re.compile('^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
__lowerCAmelCase : List[str] = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
__lowerCAmelCase : int = re.compile(R'^\s*try:')
# Catches a line with else:
__lowerCAmelCase : Tuple = re.compile(R'^\s*else:')
def a__ ( A_ ):
'''simple docstring'''
if _re_test_backend.search(A_ ) is None:
return None
__magic_name__ = [b[0] for b in _re_backend.findall(A_ )]
backends.sort()
return "_and_".join(A_ )
def a__ ( A_ ):
'''simple docstring'''
with open(A_, """r""", encoding="""utf-8""", newline="""\n""" ) as f:
__magic_name__ = f.readlines()
__magic_name__ = 0
while line_index < len(A_ ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(A_ ):
return None
# First grab the objects without a specific backend in _import_structure
__magic_name__ = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
__magic_name__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(A_ ):
__magic_name__ = _re_one_line_import_struct.search(A_ ).groups()[0]
__magic_name__ = re.findall("""\[([^\]]+)\]""", A_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
__magic_name__ = _re_import_struct_key_value.search(A_ )
if single_line_import_search is not None:
__magic_name__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(A_ ) > 0]
objects.extend(A_ )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
__magic_name__ = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
__magic_name__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__magic_name__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__magic_name__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
__magic_name__ = lines[line_index]
if _re_import_struct_add_one.search(A_ ) is not None:
objects.append(_re_import_struct_add_one.search(A_ ).groups()[0] )
elif _re_import_struct_add_many.search(A_ ) is not None:
__magic_name__ = _re_import_struct_add_many.search(A_ ).groups()[0].split(""", """ )
__magic_name__ = [obj[1:-1] for obj in imports if len(A_ ) > 0]
objects.extend(A_ )
elif _re_between_brackets.search(A_ ) is not None:
__magic_name__ = _re_between_brackets.search(A_ ).groups()[0].split(""", """ )
__magic_name__ = [obj[1:-1] for obj in imports if len(A_ ) > 0]
objects.extend(A_ )
elif _re_quote_object.search(A_ ) is not None:
objects.append(_re_quote_object.search(A_ ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
__magic_name__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__magic_name__ = []
while (
line_index < len(A_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
__magic_name__ = lines[line_index]
__magic_name__ = _re_import.search(A_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
__magic_name__ = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(A_ ):
# If the line is an if is_backend_available, we grab all objects associated.
__magic_name__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__magic_name__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__magic_name__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
__magic_name__ = lines[line_index]
__magic_name__ = _re_import.search(A_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
__magic_name__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def a__ ( A_, A_ ):
'''simple docstring'''
def find_duplicates(A_ ):
return [k for k, v in collections.Counter(A_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__magic_name__ = []
for key in import_dict_objects.keys():
__magic_name__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
__magic_name__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__magic_name__ = """base imports""" if key == """none""" else f'''{key} backend'''
errors.append(f'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def a__ ( ):
'''simple docstring'''
__magic_name__ = []
for root, _, files in os.walk(A_ ):
if "__init__.py" in files:
__magic_name__ = os.path.join(A_, """__init__.py""" )
__magic_name__ = parse_init(A_ )
if objects is not None:
__magic_name__ = analyze_results(*A_ )
if len(A_ ) > 0:
__magic_name__ = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append("""\n""".join(A_ ) )
if len(A_ ) > 0:
raise ValueError("""\n\n""".join(A_ ) )
def a__ ( ):
'''simple docstring'''
__magic_name__ = []
for path, directories, files in os.walk(A_ ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(A_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(A_ ) / folder).glob("""*.py""" ) ) ) == 0:
continue
__magic_name__ = str((Path(A_ ) / folder).relative_to(A_ ) )
__magic_name__ = short_path.replace(os.path.sep, """.""" )
submodules.append(A_ )
for fname in files:
if fname == "__init__.py":
continue
__magic_name__ = str((Path(A_ ) / fname).relative_to(A_ ) )
__magic_name__ = short_path.replace(""".py""", """""" ).replace(os.path.sep, """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(A_ )
return submodules
__lowerCAmelCase : Dict = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def a__ ( ):
'''simple docstring'''
__magic_name__ = importlib.util.spec_from_file_location(
"""transformers""", os.path.join(A_, """__init__.py""" ), submodule_search_locations=[PATH_TO_TRANSFORMERS], )
__magic_name__ = spec.loader.load_module()
__magic_name__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(A_ ) > 0:
__magic_name__ = """\n""".join(f'''- {module}''' for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registered in the main init of Transformers:\n"""
f'''{list_of_modules}\n'''
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 76 | 0 |
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class snake_case__ ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=18 , lowerCAmelCase__=30 , lowerCAmelCase__=4_00 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=[0.5, 0.5, 0.5] , lowerCAmelCase__=[0.5, 0.5, 0.5] , ) -> str:
__magic_name__ : Optional[int] = size if size is not None else {"""height""": 18, """width""": 18}
__magic_name__ : Any = parent
__magic_name__ : List[str] = batch_size
__magic_name__ : Optional[Any] = num_channels
__magic_name__ : Union[str, Any] = image_size
__magic_name__ : Optional[Any] = min_resolution
__magic_name__ : Any = max_resolution
__magic_name__ : Any = do_resize
__magic_name__ : Optional[int] = size
__magic_name__ : Optional[int] = do_normalize
__magic_name__ : Optional[int] = image_mean
__magic_name__ : Any = image_std
def __magic_name__ ( self ) -> Dict:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class snake_case__ ( _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Union[str, Any] = DPTImageProcessor if is_vision_available() else None
def __magic_name__ ( self ) -> int:
__magic_name__ : str = DPTImageProcessingTester(self )
@property
def __magic_name__ ( self ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_std""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """size""" ) )
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
__magic_name__ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def __magic_name__ ( self ) -> Optional[int]:
# Initialize image_processing
__magic_name__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
__magic_name__ : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__magic_name__ : Dict = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __magic_name__ ( self ) -> List[Any]:
# Initialize image_processing
__magic_name__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
__magic_name__ : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__magic_name__ : Union[str, Any] = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __magic_name__ ( self ) -> Optional[Any]:
# Initialize image_processing
__magic_name__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
__magic_name__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__magic_name__ : int = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 324 |
import argparse
import json
import subprocess
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = []
__magic_name__ : Optional[int] = (
f'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
""" https://api.github.com/repos/huggingface/transformers/actions/runners"""
)
__magic_name__ : Any = subprocess.run(_A, shell=_A, stdout=subprocess.PIPE )
__magic_name__ : int = output.stdout.decode("""utf-8""" )
__magic_name__ : Optional[int] = json.loads(_A )
__magic_name__ : Tuple = status["""runners"""]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_A )
# save the result so we can report them on Slack
with open("""offline_runners.txt""", """w""" ) as fp:
fp.write(json.dumps(_A ) )
if len(_A ) > 0:
__magic_name__ : Optional[Any] = """\n""".join([x["""name"""] for x in offline_runners] )
raise ValueError(f'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def UpperCamelCase ( _A ):
"""simple docstring"""
return values.split(""",""" )
__magic_name__: List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--target_runners",
default=None,
type=list_str,
required=True,
help="Comma-separated list of runners to check status.",
)
parser.add_argument(
"--token", default=None, type=str, required=True, help="A token that has actions:read permission."
)
__magic_name__: Dict = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 324 | 1 |
"""simple docstring"""
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def A__ ( ) -> Any:
'''simple docstring'''
_UpperCAmelCase = ArgumentParser("Transformers CLI tool" , usage="transformers-cli <command> [<args>]" )
_UpperCAmelCase = parser.add_subparsers(help="transformers-cli command helpers" )
# Register commands
ConvertCommand.register_subcommand(A__ )
DownloadCommand.register_subcommand(A__ )
EnvironmentCommand.register_subcommand(A__ )
RunCommand.register_subcommand(A__ )
ServeCommand.register_subcommand(A__ )
UserCommands.register_subcommand(A__ )
AddNewModelCommand.register_subcommand(A__ )
AddNewModelLikeCommand.register_subcommand(A__ )
LfsCommands.register_subcommand(A__ )
PTtoTFCommand.register_subcommand(A__ )
# Let's go
_UpperCAmelCase = parser.parse_args()
if not hasattr(A__ , "func" ):
parser.print_help()
exit(1 )
# Run
_UpperCAmelCase = args.func(A__ )
service.run()
if __name__ == "__main__":
main()
| 579 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class a ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A__ : int = "luke"
def __init__( self , snake_case_=50267 , snake_case_=500000 , snake_case_=768 , snake_case_=256 , snake_case_=12 , snake_case_=12 , snake_case_=3072 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=2 , snake_case_=0.02 , snake_case_=1e-1_2 , snake_case_=True , snake_case_=None , snake_case_=1 , snake_case_=0 , snake_case_=2 , **snake_case_ , ) -> Any:
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = entity_vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = entity_emb_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = use_entity_aware_attention
_UpperCAmelCase = classifier_dropout
| 579 | 1 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = params
lowerCamelCase_ = np.array(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = np.array([len(SCREAMING_SNAKE_CASE_ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self ) -> Any:
'''simple docstring'''
return len(self.lengths )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = self.params.max_model_input_size
lowerCamelCase_ = self.lengths > max_len
logger.info(f'''Splitting {sum(SCREAMING_SNAKE_CASE_ )} too long sequences.''' )
def divide_chunks(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return [l[i : i + n] for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )]
lowerCamelCase_ = []
lowerCamelCase_ = []
if self.params.mlm:
lowerCamelCase_ ,lowerCamelCase_ = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
lowerCamelCase_ ,lowerCamelCase_ = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
lowerCamelCase_ = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
lowerCamelCase_ = np.insert(SCREAMING_SNAKE_CASE_ , 0 , SCREAMING_SNAKE_CASE_ )
if sub_s[-1] != sep_id:
lowerCamelCase_ = np.insert(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
assert len(SCREAMING_SNAKE_CASE_ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(SCREAMING_SNAKE_CASE_ )
new_tok_ids.extend(SCREAMING_SNAKE_CASE_ )
new_lengths.extend([len(SCREAMING_SNAKE_CASE_ ) for l in sub_seqs] )
lowerCamelCase_ = np.array(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = np.array(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = len(self )
lowerCamelCase_ = self.lengths > 11
lowerCamelCase_ = self.token_ids[indices]
lowerCamelCase_ = self.lengths[indices]
lowerCamelCase_ = len(self )
logger.info(f'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
lowerCamelCase_ = self.params.special_tok_ids['unk_token']
lowerCamelCase_ = len(self )
lowerCamelCase_ = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
lowerCamelCase_ = (unk_occs / self.lengths) < 0.5
lowerCamelCase_ = self.token_ids[indices]
lowerCamelCase_ = self.lengths[indices]
lowerCamelCase_ = len(self )
logger.info(f'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
if not self.params.is_master:
return
logger.info(f'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
lowerCamelCase_ = [t[0] for t in batch]
lowerCamelCase_ = [t[1] for t in batch]
assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ )
# Max for paddings
lowerCamelCase_ = max(SCREAMING_SNAKE_CASE_ )
# Pad token ids
if self.params.mlm:
lowerCamelCase_ = self.params.special_tok_ids['pad_token']
else:
lowerCamelCase_ = self.params.special_tok_ids['unk_token']
lowerCamelCase_ = [list(t.astype(SCREAMING_SNAKE_CASE_ ) ) + [pad_idx] * (max_seq_len_ - len(SCREAMING_SNAKE_CASE_ )) for t in token_ids]
assert len(tk_ ) == len(SCREAMING_SNAKE_CASE_ )
assert all(len(SCREAMING_SNAKE_CASE_ ) == max_seq_len_ for t in tk_ )
lowerCamelCase_ = torch.tensor(tk_ ) # (bs, max_seq_len_)
lowerCamelCase_ = torch.tensor(SCREAMING_SNAKE_CASE_ ) # (bs)
return tk_t, lg_t
| 42 |
from __future__ import annotations
def __snake_case ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int ) -> list[list[int]]:
A_ : list[list[int]] = []
A_ : list[int] = []
A_ : Dict = 0
A_ : str = sum(_lowerCAmelCase )
create_state_space_tree(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return result
def __snake_case ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : list[int] , _lowerCAmelCase : list[list[int]] , _lowerCAmelCase : int , ) -> None:
if sum(_lowerCAmelCase ) > max_sum or (remaining_nums_sum + sum(_lowerCAmelCase )) < max_sum:
return
if sum(_lowerCAmelCase ) == max_sum:
result.append(_lowerCAmelCase )
return
for index in range(_lowerCAmelCase , len(_lowerCAmelCase ) ):
create_state_space_tree(
_lowerCAmelCase , _lowerCAmelCase , index + 1 , [*path, nums[index]] , _lowerCAmelCase , remaining_nums_sum - nums[index] , )
_lowerCAmelCase : List[Any] = [3, 34, 4, 12, 5, 2]
_lowerCAmelCase : Dict = 9
_lowerCAmelCase : Dict = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 454 | 0 |
"""simple docstring"""
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def A__ ( UpperCamelCase ):
random.seed(UpperCamelCase )
np.random.seed(UpperCamelCase )
torch.manual_seed(UpperCamelCase )
torch.cuda.manual_seed_all(UpperCamelCase )
# ^^ safe to call this function even if cuda is not available
class _UpperCAmelCase :
def __init__( self :int , __UpperCamelCase :Iterable[torch.nn.Parameter] , __UpperCamelCase :float = 0.9_999 , __UpperCamelCase :float = 0.0 , __UpperCamelCase :int = 0 , __UpperCamelCase :bool = False , __UpperCamelCase :Union[float, int] = 1.0 , __UpperCamelCase :Union[float, int] = 2 / 3 , __UpperCamelCase :Optional[Any] = None , __UpperCamelCase :Dict[str, Any] = None , **__UpperCamelCase :Any , ):
if isinstance(__UpperCamelCase , torch.nn.Module ):
A = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`" , "1.0.0" , __UpperCamelCase , standard_warn=__UpperCamelCase , )
A = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
A = True
if kwargs.get("max_value" , __UpperCamelCase ) is not None:
A = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value" , "1.0.0" , __UpperCamelCase , standard_warn=__UpperCamelCase )
A = kwargs["max_value"]
if kwargs.get("min_value" , __UpperCamelCase ) is not None:
A = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value" , "1.0.0" , __UpperCamelCase , standard_warn=__UpperCamelCase )
A = kwargs["min_value"]
A = list(__UpperCamelCase )
A = [p.clone().detach() for p in parameters]
if kwargs.get("device" , __UpperCamelCase ) is not None:
A = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device" , "1.0.0" , __UpperCamelCase , standard_warn=__UpperCamelCase )
self.to(device=kwargs["device"] )
A = None
A = decay
A = min_decay
A = update_after_step
A = use_ema_warmup
A = inv_gamma
A = power
A = 0
A = None # set in `step()`
A = model_cls
A = model_config
@classmethod
def lowerCamelCase ( cls :Any , __UpperCamelCase :int , __UpperCamelCase :Tuple ):
A, A = model_cls.load_config(__UpperCamelCase , return_unused_kwargs=__UpperCamelCase )
A = model_cls.from_pretrained(__UpperCamelCase )
A = cls(model.parameters() , model_cls=__UpperCamelCase , model_config=model.config )
ema_model.load_state_dict(__UpperCamelCase )
return ema_model
def lowerCamelCase ( self :List[str] , __UpperCamelCase :Optional[int] ):
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
A = self.model_cls.from_config(self.model_config )
A = self.state_dict()
state_dict.pop("shadow_params" , __UpperCamelCase )
model.register_to_config(**__UpperCamelCase )
self.copy_to(model.parameters() )
model.save_pretrained(__UpperCamelCase )
def lowerCamelCase ( self :Any , __UpperCamelCase :int ):
A = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
A = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
A = (1 + step) / (10 + step)
A = min(__UpperCamelCase , self.decay )
# make sure decay is not smaller than min_decay
A = max(__UpperCamelCase , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Iterable[torch.nn.Parameter] ):
if isinstance(__UpperCamelCase , torch.nn.Module ):
A = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`" , "1.0.0" , __UpperCamelCase , standard_warn=__UpperCamelCase , )
A = parameters.parameters()
A = list(__UpperCamelCase )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
A = self.get_decay(self.optimization_step )
A = decay
A = 1 - decay
A = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , __UpperCamelCase ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
A = deepspeed.zero.GatheredParameters(__UpperCamelCase , modifier_rank=__UpperCamelCase )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(__UpperCamelCase )
def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :Iterable[torch.nn.Parameter] ):
A = list(__UpperCamelCase )
for s_param, param in zip(self.shadow_params , __UpperCamelCase ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCamelCase ( self :List[str] , __UpperCamelCase :Union[str, Any]=None , __UpperCamelCase :Union[str, Any]=None ):
A = [
p.to(device=__UpperCamelCase , dtype=__UpperCamelCase ) if p.is_floating_point() else p.to(device=__UpperCamelCase )
for p in self.shadow_params
]
def lowerCamelCase ( self :str ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCamelCase ( self :List[str] , __UpperCamelCase :Iterable[torch.nn.Parameter] ):
A = [param.detach().cpu().clone() for param in parameters]
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Iterable[torch.nn.Parameter] ):
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params , __UpperCamelCase ):
param.data.copy_(c_param.data )
# Better memory-wise.
A = None
def lowerCamelCase ( self :List[str] , __UpperCamelCase :dict ):
A = copy.deepcopy(__UpperCamelCase )
A = state_dict.get("decay" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
A = state_dict.get("min_decay" , self.min_decay )
if not isinstance(self.min_decay , __UpperCamelCase ):
raise ValueError("Invalid min_decay" )
A = state_dict.get("optimization_step" , self.optimization_step )
if not isinstance(self.optimization_step , __UpperCamelCase ):
raise ValueError("Invalid optimization_step" )
A = state_dict.get("update_after_step" , self.update_after_step )
if not isinstance(self.update_after_step , __UpperCamelCase ):
raise ValueError("Invalid update_after_step" )
A = state_dict.get("use_ema_warmup" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , __UpperCamelCase ):
raise ValueError("Invalid use_ema_warmup" )
A = state_dict.get("inv_gamma" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("Invalid inv_gamma" )
A = state_dict.get("power" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("Invalid power" )
A = state_dict.get("shadow_params" , __UpperCamelCase )
if shadow_params is not None:
A = shadow_params
if not isinstance(self.shadow_params , __UpperCamelCase ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(__UpperCamelCase , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" )
| 524 |
"""simple docstring"""
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = 42
UpperCamelCase = jnp.floataa
UpperCamelCase = True
def lowerCamelCase ( self :Optional[int] ):
super().setup()
A = nn.Dense(5 , dtype=self.dtype )
def __call__( self :Tuple , *__UpperCamelCase :str , **__UpperCamelCase :List[Any] ):
A = super().__call__(*__UpperCamelCase , **__UpperCamelCase )
A = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = FlaxBigBirdForNaturalQuestionsModule
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
def cross_entropy(UpperCamelCase , UpperCamelCase , UpperCamelCase=None ):
A = logits.shape[-1]
A = (labels[..., None] == jnp.arange(UpperCamelCase )[None]).astype("f4" )
A = jax.nn.log_softmax(UpperCamelCase , axis=-1 )
A = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
A = reduction(UpperCamelCase )
return loss
A = partial(UpperCamelCase , reduction=jnp.mean )
A = cross_entropy(UpperCamelCase , UpperCamelCase )
A = cross_entropy(UpperCamelCase , UpperCamelCase )
A = cross_entropy(UpperCamelCase , UpperCamelCase )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class _UpperCAmelCase :
UpperCamelCase = "google/bigbird-roberta-base"
UpperCamelCase = 3_0_0_0
UpperCamelCase = 1_0_5_0_0
UpperCamelCase = 1_2_8
UpperCamelCase = 3
UpperCamelCase = 1
UpperCamelCase = 5
# tx_args
UpperCamelCase = 3e-5
UpperCamelCase = 0.0
UpperCamelCase = 2_0_0_0_0
UpperCamelCase = 0.0095
UpperCamelCase = "bigbird-roberta-natural-questions"
UpperCamelCase = "training-expt"
UpperCamelCase = "data/nq-training.jsonl"
UpperCamelCase = "data/nq-validation.jsonl"
def lowerCamelCase ( self :Optional[Any] ):
os.makedirs(self.base_dir , exist_ok=__UpperCamelCase )
A = os.path.join(self.base_dir , self.save_dir )
A = self.batch_size_per_device * jax.device_count()
@dataclass
class _UpperCAmelCase :
UpperCamelCase = 42
UpperCamelCase = 4_0_9_6 # no dynamic padding on TPUs
def __call__( self :List[Any] , __UpperCamelCase :Union[str, Any] ):
A = self.collate_fn(__UpperCamelCase )
A = jax.tree_util.tree_map(__UpperCamelCase , __UpperCamelCase )
return batch
def lowerCamelCase ( self :Tuple , __UpperCamelCase :Tuple ):
A, A = self.fetch_inputs(features["input_ids"] )
A = {
"input_ids": jnp.array(__UpperCamelCase , dtype=jnp.intaa ),
"attention_mask": jnp.array(__UpperCamelCase , dtype=jnp.intaa ),
"start_labels": jnp.array(features["start_token"] , dtype=jnp.intaa ),
"end_labels": jnp.array(features["end_token"] , dtype=jnp.intaa ),
"pooled_labels": jnp.array(features["category"] , dtype=jnp.intaa ),
}
return batch
def lowerCamelCase ( self :int , __UpperCamelCase :list ):
A = [self._fetch_inputs(__UpperCamelCase ) for ids in input_ids]
return zip(*__UpperCamelCase )
def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :list ):
A = [1 for _ in range(len(__UpperCamelCase ) )]
while len(__UpperCamelCase ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase=None ):
if seed is not None:
A = dataset.shuffle(seed=UpperCamelCase )
for i in range(len(UpperCamelCase ) // batch_size ):
A = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(UpperCamelCase )
@partial(jax.pmap , axis_name="batch" )
def A__ ( UpperCamelCase , UpperCamelCase , **UpperCamelCase ):
def loss_fn(UpperCamelCase ):
A = model_inputs.pop("start_labels" )
A = model_inputs.pop("end_labels" )
A = model_inputs.pop("pooled_labels" )
A = state.apply_fn(**UpperCamelCase , params=UpperCamelCase , dropout_rng=UpperCamelCase , train=UpperCamelCase )
A, A, A = outputs
return state.loss_fn(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , )
A, A = jax.random.split(UpperCamelCase )
A = jax.value_and_grad(UpperCamelCase )
A, A = grad_fn(state.params )
A = jax.lax.pmean({"loss": loss} , axis_name="batch" )
A = jax.lax.pmean(UpperCamelCase , "batch" )
A = state.apply_gradients(grads=UpperCamelCase )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="batch" )
def A__ ( UpperCamelCase , **UpperCamelCase ):
A = model_inputs.pop("start_labels" )
A = model_inputs.pop("end_labels" )
A = model_inputs.pop("pooled_labels" )
A = state.apply_fn(**UpperCamelCase , params=state.params , train=UpperCamelCase )
A, A, A = outputs
A = state.loss_fn(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
A = jax.lax.pmean({"loss": loss} , axis_name="batch" )
return metrics
class _UpperCAmelCase ( train_state.TrainState ):
UpperCamelCase = struct.field(pytree_node=lowercase_ )
@dataclass
class _UpperCAmelCase :
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = None
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Optional[int] , __UpperCamelCase :List[str] , __UpperCamelCase :Any , __UpperCamelCase :int=None ):
A = model.params
A = TrainState.create(
apply_fn=model.__call__ , params=__UpperCamelCase , tx=__UpperCamelCase , loss_fn=__UpperCamelCase , )
if ckpt_dir is not None:
A, A, A, A, A = restore_checkpoint(__UpperCamelCase , __UpperCamelCase )
A = {
"lr": args.lr,
"init_lr": args.init_lr,
"warmup_steps": args.warmup_steps,
"num_train_steps": num_train_steps,
"weight_decay": args.weight_decay,
}
A, A = build_tx(**__UpperCamelCase )
A = train_state.TrainState(
step=__UpperCamelCase , apply_fn=model.__call__ , params=__UpperCamelCase , tx=__UpperCamelCase , opt_state=__UpperCamelCase , )
A = args
A = data_collator
A = lr
A = params
A = jax_utils.replicate(__UpperCamelCase )
return state
def lowerCamelCase ( self :List[str] , __UpperCamelCase :List[Any] , __UpperCamelCase :List[Any] , __UpperCamelCase :Optional[int] ):
A = self.args
A = len(__UpperCamelCase ) // args.batch_size
A = jax.random.PRNGKey(0 )
A = jax.random.split(__UpperCamelCase , jax.device_count() )
for epoch in range(args.max_epochs ):
A = jnp.array(0 , dtype=jnp.floataa )
A = get_batched_dataset(__UpperCamelCase , args.batch_size , seed=__UpperCamelCase )
A = 0
for batch in tqdm(__UpperCamelCase , total=__UpperCamelCase , desc=f"Running EPOCH-{epoch}" ):
A = self.data_collator(__UpperCamelCase )
A, A, A = self.train_step_fn(__UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
if i % args.logging_steps == 0:
A = jax_utils.unreplicate(state.step )
A = running_loss.item() / i
A = self.scheduler_fn(state_step - 1 )
A = self.evaluate(__UpperCamelCase , __UpperCamelCase )
A = {
"step": state_step.item(),
"eval_loss": eval_loss.item(),
"tr_loss": tr_loss,
"lr": lr.item(),
}
tqdm.write(str(__UpperCamelCase ) )
self.logger.log(__UpperCamelCase , commit=__UpperCamelCase )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f"-e{epoch}-s{i}" , state=__UpperCamelCase )
def lowerCamelCase ( self :int , __UpperCamelCase :Optional[Any] , __UpperCamelCase :List[Any] ):
A = get_batched_dataset(__UpperCamelCase , self.args.batch_size )
A = len(__UpperCamelCase ) // self.args.batch_size
A = jnp.array(0 , dtype=jnp.floataa )
A = 0
for batch in tqdm(__UpperCamelCase , total=__UpperCamelCase , desc="Evaluating ... " ):
A = self.data_collator(__UpperCamelCase )
A = self.val_step_fn(__UpperCamelCase , **__UpperCamelCase )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
return running_loss / i
def lowerCamelCase ( self :List[str] , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[int] ):
A = jax_utils.unreplicate(__UpperCamelCase )
print(f"SAVING CHECKPOINT IN {save_dir}" , end=" ... " )
self.model_save_fn(__UpperCamelCase , params=state.params )
with open(os.path.join(__UpperCamelCase , "opt_state.msgpack" ) , "wb" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(__UpperCamelCase , "args.joblib" ) )
joblib.dump(self.data_collator , os.path.join(__UpperCamelCase , "data_collator.joblib" ) )
with open(os.path.join(__UpperCamelCase , "training_state.json" ) , "w" ) as f:
json.dump({"step": state.step.item()} , __UpperCamelCase )
print("DONE" )
def A__ ( UpperCamelCase , UpperCamelCase ):
print(F"RESTORING CHECKPOINT FROM {save_dir}" , end=" ... " )
with open(os.path.join(UpperCamelCase , "flax_model.msgpack" ) , "rb" ) as f:
A = from_bytes(state.params , f.read() )
with open(os.path.join(UpperCamelCase , "opt_state.msgpack" ) , "rb" ) as f:
A = from_bytes(state.opt_state , f.read() )
A = joblib.load(os.path.join(UpperCamelCase , "args.joblib" ) )
A = joblib.load(os.path.join(UpperCamelCase , "data_collator.joblib" ) )
with open(os.path.join(UpperCamelCase , "training_state.json" ) , "r" ) as f:
A = json.load(UpperCamelCase )
A = training_state["step"]
print("DONE" )
return params, opt_state, step, args, data_collator
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
A = num_train_steps - warmup_steps
A = optax.linear_schedule(init_value=UpperCamelCase , end_value=UpperCamelCase , transition_steps=UpperCamelCase )
A = optax.linear_schedule(init_value=UpperCamelCase , end_value=1E-7 , transition_steps=UpperCamelCase )
A = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
def weight_decay_mask(UpperCamelCase ):
A = traverse_util.flatten_dict(UpperCamelCase )
A = {k: (v[-1] != "bias" and v[-2:] != ("LayerNorm", "scale")) for k, v in params.items()}
return traverse_util.unflatten_dict(UpperCamelCase )
A = scheduler_fn(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
A = optax.adamw(learning_rate=UpperCamelCase , weight_decay=UpperCamelCase , mask=UpperCamelCase )
return tx, lr
| 524 | 1 |
def _a ( lowerCAmelCase = 1000 )-> Tuple:
SCREAMING_SNAKE_CASE_ = -1
SCREAMING_SNAKE_CASE_ = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
SCREAMING_SNAKE_CASE_ = (n * n - 2 * a * n) // (2 * n - 2 * a)
SCREAMING_SNAKE_CASE_ = n - a - b
if c * c == (a * a + b * b):
SCREAMING_SNAKE_CASE_ = a * b * c
if candidate >= product:
SCREAMING_SNAKE_CASE_ = candidate
return product
if __name__ == "__main__":
print(f"""{solution() = }""") | 360 |
'''simple docstring'''
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
UpperCAmelCase = (boundary[1] - boundary[0]) / steps
UpperCAmelCase = boundary[0]
UpperCAmelCase = boundary[1]
UpperCAmelCase = make_points(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCAmelCase = 0.0
y += (h / 2.0) * f(SCREAMING_SNAKE_CASE )
for i in x_i:
# print(i)
y += h * f(SCREAMING_SNAKE_CASE )
y += (h / 2.0) * f(SCREAMING_SNAKE_CASE )
return y
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Tuple ):
UpperCAmelCase = a + h
while x < (b - h):
yield x
UpperCAmelCase = x + h
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Optional[Any] ): # enter your function here
UpperCAmelCase = (x - 0) * (x - 0)
return y
def lowerCamelCase__ ( ):
UpperCAmelCase = 0.0 # Lower bound of integration
UpperCAmelCase = 1.0 # Upper bound of integration
UpperCAmelCase = 10.0 # define number of steps or resolution
UpperCAmelCase = [a, b] # define boundary of integration
UpperCAmelCase = method_a(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(f'''y = {y}''' )
if __name__ == "__main__":
main()
| 447 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : str = {
"configuration_mobilebert": [
"MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileBertConfig",
"MobileBertOnnxConfig",
],
"tokenization_mobilebert": ["MobileBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : str = ["MobileBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[int] = [
"MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileBertForMaskedLM",
"MobileBertForMultipleChoice",
"MobileBertForNextSentencePrediction",
"MobileBertForPreTraining",
"MobileBertForQuestionAnswering",
"MobileBertForSequenceClassification",
"MobileBertForTokenClassification",
"MobileBertLayer",
"MobileBertModel",
"MobileBertPreTrainedModel",
"load_tf_weights_in_mobilebert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Any = [
"TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileBertForMaskedLM",
"TFMobileBertForMultipleChoice",
"TFMobileBertForNextSentencePrediction",
"TFMobileBertForPreTraining",
"TFMobileBertForQuestionAnswering",
"TFMobileBertForSequenceClassification",
"TFMobileBertForTokenClassification",
"TFMobileBertMainLayer",
"TFMobileBertModel",
"TFMobileBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 712 |
from __future__ import annotations
import time
import numpy as np
lowerCamelCase_ : Any = [8, 5, 9, 7]
lowerCamelCase_ : int = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
lowerCamelCase_ : int = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class _lowerCamelCase :
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
__snake_case = claim_vector
__snake_case = allocated_resources_table
__snake_case = maximum_claim_table
def __lowerCamelCase ( self ):
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __lowerCamelCase ( self ):
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __lowerCamelCase ( self ):
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(SCREAMING_SNAKE_CASE_ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __lowerCamelCase ( self ):
return {self.__need().index(SCREAMING_SNAKE_CASE_ ): i for i in self.__need()}
def __lowerCamelCase ( self , **SCREAMING_SNAKE_CASE_ ):
__snake_case = self.__need()
__snake_case = self.__allocated_resources_table
__snake_case = self.__available_resources()
__snake_case = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
__snake_case = False
for each_need in need_list:
__snake_case = True
for index, need in enumerate(SCREAMING_SNAKE_CASE_ ):
if need > available_resources[index]:
__snake_case = False
break
if execution:
__snake_case = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__snake_case = original_need_index
print(f'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(SCREAMING_SNAKE_CASE_ )
# update available/freed resources stack
__snake_case = np.array(SCREAMING_SNAKE_CASE_ ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(SCREAMING_SNAKE_CASE_ ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def __lowerCamelCase ( self ):
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
f'''P{self.__allocated_resources_table.index(SCREAMING_SNAKE_CASE_ ) + 1}'''
+ ' '.join(f'''{it:>8}''' for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
f'''P{self.__maximum_claim_table.index(SCREAMING_SNAKE_CASE_ ) + 1}'''
+ ' '.join(f'''{it:>8}''' for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(SCREAMING_SNAKE_CASE_ ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(SCREAMING_SNAKE_CASE_ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345 | 0 |
'''simple docstring'''
from numpy import exp, pi, sqrt
def __UpperCAmelCase ( a_: List[str], a_: float = 0.0, a_: float = 1.0 ):
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 494 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ : Optional[int] = '▁'
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class _SCREAMING_SNAKE_CASE ( A , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = BertGenerationTokenizer
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
def __snake_case( self ):
super().setUp()
_UpperCAmelCase : Optional[int] = BertGenerationTokenizer(A_ , keep_accents=A_ )
tokenizer.save_pretrained(self.tmpdirname )
def __snake_case( self ):
_UpperCAmelCase : Optional[int] = """<s>"""
_UpperCAmelCase : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def __snake_case( self ):
_UpperCAmelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(A_ ) , 10_02 )
def __snake_case( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def __snake_case( self ):
_UpperCAmelCase : Union[str, Any] = BertGenerationTokenizer(A_ , keep_accents=A_ )
_UpperCAmelCase : List[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A_ ) , [2_85, 46, 10, 1_70, 3_82] , )
_UpperCAmelCase : int = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_UpperCAmelCase : List[str] = tokenizer.convert_tokens_to_ids(A_ )
self.assertListEqual(
A_ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_UpperCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(
A_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def __snake_case( self ):
return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
@slow
def __snake_case( self ):
_UpperCAmelCase : Optional[Any] = """Hello World!"""
_UpperCAmelCase : str = [1_85_36, 22_60, 1_01]
self.assertListEqual(A_ , self.big_tokenizer.encode(A_ ) )
@slow
def __snake_case( self ):
_UpperCAmelCase : List[str] = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
_UpperCAmelCase : List[Any] = [
8_71,
4_19,
3_58,
9_46,
9_91,
25_21,
4_52,
3_58,
13_57,
3_87,
77_51,
35_36,
1_12,
9_85,
4_56,
1_26,
8_65,
9_38,
54_00,
57_34,
4_58,
13_68,
4_67,
7_86,
24_62,
52_46,
11_59,
6_33,
8_65,
45_19,
4_57,
5_82,
8_52,
25_57,
4_27,
9_16,
5_08,
4_05,
3_43_24,
4_97,
3_91,
4_08,
1_13_42,
12_44,
3_85,
1_00,
9_38,
9_85,
4_56,
5_74,
3_62,
1_25_97,
32_00,
31_29,
11_72,
]
self.assertListEqual(A_ , self.big_tokenizer.encode(A_ ) )
@require_torch
@slow
def __snake_case( self ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
_UpperCAmelCase : Optional[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
_UpperCAmelCase : Tuple = """ """.join(A_ )
_UpperCAmelCase : List[Any] = self.big_tokenizer.encode_plus(A_ , return_tensors="""pt""" , return_token_type_ids=A_ )
_UpperCAmelCase : Any = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=A_ )
_UpperCAmelCase : int = BertGenerationConfig()
_UpperCAmelCase : Dict = BertGenerationEncoder(A_ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**A_ )
model(**A_ )
@slow
def __snake_case( self ):
# fmt: off
_UpperCAmelCase : Optional[int] = {"""input_ids""": [[3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14], [4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
| 643 | 0 |
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> List[str]:
return x if y == 0 else greatest_common_divisor(lowercase__ , x % y )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> int:
return (x * y) // greatest_common_divisor(lowercase__ , lowercase__ )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = 20 ) -> Tuple:
lowerCamelCase__ : Optional[Any] = 1
for i in range(1 , n + 1 ):
lowerCamelCase__ : Optional[Any] = lcm(lowercase__ , lowercase__ )
return g
if __name__ == "__main__":
print(F'{solution() = }')
| 702 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Union[str, Any] =logging.get_logger(__name__)
_A : List[str] ={
'''MIT/ast-finetuned-audioset-10-10-0.4593''': (
'''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'''
),
}
class _lowercase ( _lowercase ):
a = """audio-spectrogram-transformer"""
def __init__( self: str , UpperCamelCase__: Any=768 , UpperCamelCase__: Union[str, Any]=12 , UpperCamelCase__: List[Any]=12 , UpperCamelCase__: int=3_072 , UpperCamelCase__: Optional[Any]="gelu" , UpperCamelCase__: Optional[int]=0.0 , UpperCamelCase__: Tuple=0.0 , UpperCamelCase__: Union[str, Any]=0.02 , UpperCamelCase__: Dict=1e-12 , UpperCamelCase__: List[str]=16 , UpperCamelCase__: List[str]=True , UpperCamelCase__: Any=10 , UpperCamelCase__: List[str]=10 , UpperCamelCase__: Any=1_024 , UpperCamelCase__: Optional[Any]=128 , **UpperCamelCase__: Union[str, Any] , ):
super().__init__(**UpperCamelCase__ )
lowerCamelCase__ : List[Any] = hidden_size
lowerCamelCase__ : int = num_hidden_layers
lowerCamelCase__ : List[str] = num_attention_heads
lowerCamelCase__ : Optional[int] = intermediate_size
lowerCamelCase__ : List[Any] = hidden_act
lowerCamelCase__ : List[Any] = hidden_dropout_prob
lowerCamelCase__ : str = attention_probs_dropout_prob
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : List[str] = layer_norm_eps
lowerCamelCase__ : List[Any] = patch_size
lowerCamelCase__ : List[str] = qkv_bias
lowerCamelCase__ : Dict = frequency_stride
lowerCamelCase__ : List[Any] = time_stride
lowerCamelCase__ : str = max_length
lowerCamelCase__ : Dict = num_mel_bins
| 631 | 0 |
'''simple docstring'''
import math
def UpperCAmelCase_ ( A ):
'''simple docstring'''
if not isinstance(a_ , a_ ):
_a : Tuple = f'''Input value of [number={number}] must be an integer'''
raise TypeError(a_ )
if number < 1:
_a : Optional[int] = f'''Input value of [number={number}] must be > 0'''
raise ValueError(a_ )
elif number == 1:
return 3
elif number == 2:
return 5
else:
_a : Tuple = int(math.log(number // 3 , 2 ) ) + 2
_a : str = [3, 5]
_a : int = 2
_a : List[Any] = 3
for block in range(1 , a_ ):
for _ in range(a_ ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
UpperCAmelCase_ : Tuple = 0
try:
UpperCAmelCase_ : Any = proth(number)
except ValueError:
print(f'''ValueError: there is no {number}th Proth number''')
continue
print(f'''The {number}th Proth number: {value}''')
| 120 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {
'''configuration_blenderbot''': [
'''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotConfig''',
'''BlenderbotOnnxConfig''',
],
'''tokenization_blenderbot''': ['''BlenderbotTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''BlenderbotTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotForCausalLM''',
'''BlenderbotForConditionalGeneration''',
'''BlenderbotModel''',
'''BlenderbotPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''TFBlenderbotForConditionalGeneration''',
'''TFBlenderbotModel''',
'''TFBlenderbotPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FlaxBlenderbotForConditionalGeneration''',
'''FlaxBlenderbotModel''',
'''FlaxBlenderbotPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 677 | 0 |
'''simple docstring'''
__snake_case : Dict = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 691 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__snake_case : Optional[int] = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(a )
class A ( a ):
__UpperCAmelCase : Dict = """rag"""
__UpperCAmelCase : Dict = True
def __init__( self , snake_case_=None , snake_case_=True , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=" / " , snake_case_=" // " , snake_case_=5 , snake_case_=3_0_0 , snake_case_=7_6_8 , snake_case_=8 , snake_case_="wiki_dpr" , snake_case_="train" , snake_case_="compressed" , snake_case_=None , snake_case_=None , snake_case_=False , snake_case_=False , snake_case_=0.0 , snake_case_=True , snake_case_=False , snake_case_=False , snake_case_=False , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Optional[Any]:
super().__init__(
bos_token_id=snake_case_ , pad_token_id=snake_case_ , eos_token_id=snake_case_ , decoder_start_token_id=snake_case_ , forced_eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , prefix=snake_case_ , vocab_size=snake_case_ , **snake_case_ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_a = kwargs.pop("question_encoder" )
_a = question_encoder_config.pop("model_type" )
_a = kwargs.pop("generator" )
_a = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
_a = AutoConfig.for_model(snake_case_ , **snake_case_ )
_a = AutoConfig.for_model(snake_case_ , **snake_case_ )
_a = reduce_loss
_a = label_smoothing
_a = exclude_bos_score
_a = do_marginalize
_a = title_sep
_a = doc_sep
_a = n_docs
_a = max_combined_length
_a = dataset
_a = dataset_split
_a = index_name
_a = retrieval_vector_size
_a = retrieval_batch_size
_a = passages_path
_a = index_path
_a = use_dummy_dataset
_a = output_retrieved
_a = do_deduplication
_a = use_cache
if self.forced_eos_token_id is None:
_a = getattr(self.generator , "forced_eos_token_id" , snake_case_ )
@classmethod
def __lowerCAmelCase ( cls , snake_case_ , snake_case_ , **snake_case_ ) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = copy.deepcopy(self.__dict__ )
_a = self.question_encoder.to_dict()
_a = self.generator.to_dict()
_a = self.__class__.model_type
return output
| 691 | 1 |
'''simple docstring'''
import os
import sys
import unittest
__magic_name__ : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__magic_name__ : str = os.path.join(git_repo_path, """src""", """transformers""")
__magic_name__ : str = """
{0} = None
"""
__magic_name__ : Tuple = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
"""
__magic_name__ : Optional[Any] = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ):
_snake_case = find_backend(" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")" )
self.assertIsNone(lowerCamelCase )
_snake_case = find_backend(" if not is_tokenizers_available():" )
self.assertEqual(lowerCamelCase , "tokenizers" )
_snake_case = find_backend(" if not is_tensorflow_text_available():" )
self.assertEqual(lowerCamelCase , "tensorflow_text" )
_snake_case = find_backend(" if not (is_sentencepiece_available() and is_tokenizers_available()):" )
self.assertEqual(lowerCamelCase , "sentencepiece_and_tokenizers" )
_snake_case = find_backend(
" if not (is_sentencepiece_available() and is_tensorflow_text_available()):" )
self.assertEqual(lowerCamelCase , "sentencepiece_and_tensorflow_text" )
_snake_case = find_backend(
" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):" )
self.assertEqual(lowerCamelCase , "sentencepiece_and_tokenizers_and_vision" )
def UpperCamelCase( self ):
_snake_case = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , lowerCamelCase )
self.assertIn("tensorflow_text" , lowerCamelCase )
self.assertIn("sentencepiece_and_tokenizers" , lowerCamelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn("BertModel" , objects["torch"] )
self.assertIn("TFBertModel" , objects["tf"] )
self.assertIn("FlaxBertModel" , objects["flax"] )
self.assertIn("BertModel" , objects["torch"] )
self.assertIn("TFBertTokenizer" , objects["tensorflow_text"] )
self.assertIn("convert_slow_tokenizer" , objects["sentencepiece_and_tokenizers"] )
def UpperCamelCase( self ):
_snake_case = create_dummy_object("CONSTANT" , "'torch'" )
self.assertEqual(lowerCamelCase , "\nCONSTANT = None\n" )
_snake_case = create_dummy_object("function" , "'torch'" )
self.assertEqual(
lowerCamelCase , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
_snake_case = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n"
_snake_case = create_dummy_object("FakeClass" , "'torch'" )
self.assertEqual(lowerCamelCase , lowerCamelCase )
def UpperCamelCase( self ):
_snake_case = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n"
_snake_case = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] , lowerCamelCase )
| 672 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__magic_name__ : Optional[int] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = ['''pixel_values''']
def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BILINEAR , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = None , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
_snake_case = size if size is not None else {"shortest_edge": 256}
_snake_case = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
_snake_case = crop_size if crop_size is not None else {"height": 224, "width": 224}
_snake_case = get_size_dict(lowerCamelCase )
_snake_case = do_resize
_snake_case = size
_snake_case = resample
_snake_case = do_center_crop
_snake_case = crop_size
_snake_case = do_rescale
_snake_case = rescale_factor
_snake_case = do_normalize
_snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = None , **lowerCamelCase , ):
_snake_case = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
_snake_case = get_resize_output_image_size(lowerCamelCase , size=size["shortest_edge"] , default_to_square=lowerCamelCase )
return resize(lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
_snake_case = get_size_dict(lowerCamelCase )
return center_crop(lowerCamelCase , size=(size["height"], size["width"]) , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ):
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ):
_snake_case = do_resize if do_resize is not None else self.do_resize
_snake_case = size if size is not None else self.size
_snake_case = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
_snake_case = resample if resample is not None else self.resample
_snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
_snake_case = crop_size if crop_size is not None else self.crop_size
_snake_case = get_size_dict(lowerCamelCase )
_snake_case = do_rescale if do_rescale is not None else self.do_rescale
_snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case = do_normalize if do_normalize is not None else self.do_normalize
_snake_case = image_mean if image_mean is not None else self.image_mean
_snake_case = image_std if image_std is not None else self.image_std
_snake_case = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_snake_case = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
_snake_case = [self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_center_crop:
_snake_case = [self.center_crop(image=lowerCamelCase , size=lowerCamelCase ) for image in images]
if do_rescale:
_snake_case = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images]
if do_normalize:
_snake_case = [self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase ) for image in images]
_snake_case = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
_snake_case = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 672 | 1 |
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
a__ : List[str] = logging.get_logger(__name__)
def _lowerCAmelCase ( A__ ):
if isinstance(A__ , np.ndarray ):
return list(tensor.shape )
lowercase__ = tf.shape(A__ )
if tensor.shape == tf.TensorShape(A__ ):
return dynamic
lowercase__ = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(A__ )]
def _lowerCAmelCase ( A__ , A__ = None , A__ = None ):
return tf.nn.softmax(logits=logits + 1E-9 , axis=A__ , name=A__ )
def _lowerCAmelCase ( A__ , A__ , A__ , A__=1E-5 , A__=-1 ):
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(A__ , A__ ):
raise NotImplementedError('Only 1D weight and bias tensors are supported for now, with only a single axis.' )
# Get mean and variance on the axis to be normalized
lowercase__, lowercase__ = tf.nn.moments(A__ , axes=[axis] , keepdims=A__ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
lowercase__ = [1] * inputs.shape.rank
lowercase__ = shape_list(A__ )[axis]
lowercase__ = tf.reshape(A__ , A__ )
lowercase__ = tf.reshape(A__ , A__ )
# Compute layer normalization using the batch_normalization
# function.
lowercase__ = tf.nn.batch_normalization(
A__ , A__ , A__ , offset=A__ , scale=A__ , variance_epsilon=A__ , )
return outputs
def _lowerCAmelCase ( A__ , A__=0 , A__=-1 ):
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
lowercase__ = tf.shape(A__ )
lowercase__ = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
lowercase__ = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(A__ , A__ )
def _lowerCAmelCase ( A__ ):
if not isinstance(A__ , tf.Tensor ):
lowercase__ = tf.convert_to_tensor(A__ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
lowercase__ = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
lowercase__ = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
lowercase__ = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def _lowerCAmelCase ( A__ , A__ , A__ = "input_ids" ):
tf.debugging.assert_less(
A__ , tf.cast(A__ , dtype=tensor.dtype ) , message=(
F'''The maximum value of {tensor_name} ({tf.math.reduce_max(A__ )}) must be smaller than the embedding '''
F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = 64_512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
lowercase__ = [x for x in data if len(A__ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'The following attributes cannot be saved to HDF5 file because '
F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
F'''bytes: {bad_attributes}''' )
lowercase__ = np.asarray(A__ )
lowercase__ = 1
lowercase__ = np.array_split(A__ , A__ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
lowercase__ = np.array_split(A__ , A__ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(A__ ):
lowercase__ = chunk_data
else:
lowercase__ = data
def _lowerCAmelCase ( A__ , A__ ):
if name in group.attrs:
lowercase__ = [n.decode('utf8' ) if hasattr(A__ , 'decode' ) else n for n in group.attrs[name]]
else:
lowercase__ = []
lowercase__ = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('utf8' ) if hasattr(A__ , 'decode' ) else n for n in group.attrs['%s%d' % (name, chunk_id)]] )
chunk_id += 1
return data
def _lowerCAmelCase ( A__ ):
def _expand_single_ad_tensor(A__ ):
if isinstance(A__ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(A__ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , A__ )
| 642 |
import math
import sys
def _lowerCAmelCase ( A__ ):
lowercase__ = ''
try:
with open(A__ , 'rb' ) as binary_file:
lowercase__ = binary_file.read()
for dat in data:
lowercase__ = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def _lowerCAmelCase ( A__ ):
lowercase__ = {'0': '0', '1': '1'}
lowercase__, lowercase__ = '', ''
lowercase__ = len(A__ )
for i in range(len(A__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowercase__ = lexicon[curr_string]
result += last_match_id
lowercase__ = last_match_id + '0'
if math.loga(A__ ).is_integer():
lowercase__ = {}
for curr_key in list(A__ ):
lowercase__ = lexicon.pop(A__ )
lowercase__ = new_lex
lowercase__ = last_match_id + '1'
index += 1
lowercase__ = ''
return result
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = 8
try:
with open(A__ , 'wb' ) as opened_file:
lowercase__ = [
to_write[i : i + byte_length]
for i in range(0 , len(A__ ) , A__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(A__ , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def _lowerCAmelCase ( A__ ):
lowercase__ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
lowercase__ = data_bits[counter:]
lowercase__ = data_bits[counter + 1 :]
return data_bits
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = read_file_binary(A__ )
lowercase__ = remove_prefix(A__ )
lowercase__ = decompress_data(A__ )
write_file_binary(A__ , A__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 642 | 1 |
"""simple docstring"""
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def SCREAMING_SNAKE_CASE_ ( snake_case : List[str] )-> Optional[Any]:
_lowerCamelCase = checkpoints.load_tax_checkpoint(snake_case )
_lowerCamelCase = flatten_dict(snake_case )
return flax_params
def SCREAMING_SNAKE_CASE_ ( snake_case : str )-> int:
_lowerCamelCase = {}
_lowerCamelCase = {
'token_embedder': 'embeddings',
'encoder_norm': 'layernorm',
'kernel': 'weight',
'.out': '.output',
'scale': 'weight',
'embedders_0.pos_embedding': 'row_embedder.weight',
'embedders_1.pos_embedding': 'column_embedder.weight',
}
_lowerCamelCase = {
'query': 'attention.query',
'key': 'attention.key',
'value': 'attention.value',
'output.dense': 'output',
'encoder_decoder_attention.o': 'encoder_decoder_attention.attention.o',
'pre_self_attention_layer_norm': 'self_attention.layer_norm',
'pre_cross_attention_layer_norm': 'encoder_decoder_attention.layer_norm',
'mlp.': 'mlp.DenseReluDense.',
'pre_mlp_layer_norm': 'mlp.layer_norm',
'self_attention.o': 'self_attention.attention.o',
'decoder.embeddings.embedding': 'decoder.embed_tokens.weight',
'decoder.relpos_bias.rel_embedding': 'decoder.layer.0.self_attention.attention.relative_attention_bias.weight',
'decoder.decoder_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.logits_dense.weight': 'decoder.lm_head.weight',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
_lowerCamelCase = '.'.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
_lowerCamelCase = new_key.replace(snake_case , snake_case )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
_lowerCamelCase = new_key.replace(snake_case , snake_case )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
_lowerCamelCase = re.sub(r'layers_(\d+)' , r'layer.\1' , snake_case )
_lowerCamelCase = new_key.replace('encoder' , 'encoder.encoder' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
_lowerCamelCase = re.sub(r'layers_(\d+)' , r'layer.\1' , snake_case )
_lowerCamelCase = flax_dict[key]
_lowerCamelCase = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
_lowerCamelCase = torch.from_numpy(converted_dict[key].T )
else:
_lowerCamelCase = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def SCREAMING_SNAKE_CASE_ ( snake_case : List[Any] , snake_case : Optional[int] , snake_case : str=False , snake_case : int=False )-> Dict:
_lowerCamelCase = get_flax_param(snake_case )
if not use_large:
_lowerCamelCase = PixaStructVisionConfig()
_lowerCamelCase = PixaStructTextConfig()
else:
_lowerCamelCase = PixaStructVisionConfig(
hidden_size=1_536 , d_ff=3_968 , num_attention_heads=24 , num_hidden_layers=18 )
_lowerCamelCase = PixaStructTextConfig(hidden_size=1_536 , d_ff=3_968 , num_heads=24 , num_layers=18 )
_lowerCamelCase = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=snake_case )
_lowerCamelCase = PixaStructForConditionalGeneration(snake_case )
_lowerCamelCase = rename_and_convert_flax_params(snake_case )
model.load_state_dict(snake_case )
_lowerCamelCase = AutoTokenizer.from_pretrained('ybelkada/test-pix2struct-tokenizer' )
_lowerCamelCase = PixaStructImageProcessor()
_lowerCamelCase = PixaStructProcessor(image_processor=snake_case , tokenizer=snake_case )
if use_large:
_lowerCamelCase = 4_096
_lowerCamelCase = True
# mkdir if needed
os.makedirs(snake_case , exist_ok=snake_case )
model.save_pretrained(snake_case )
processor.save_pretrained(snake_case )
print('Model saved in {}'.format(snake_case ) )
if __name__ == "__main__":
A_ : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument("""--t5x_checkpoint_path""", default=None, type=str, help="""Path to the original T5x checkpoint.""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--use_large""", action="""store_true""", help="""Use large model.""")
parser.add_argument("""--is_vqa""", action="""store_true""", help="""Use large model.""")
A_ : List[str] =parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 650 |
"""simple docstring"""
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
A_ : Union[str, Any] ={
"""User-Agent""": """Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"""
""" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"""
}
def SCREAMING_SNAKE_CASE_ ( snake_case : str = "dhaka" , snake_case : int = 5 )-> int:
_lowerCamelCase = min(snake_case , 50 ) # Prevent abuse!
_lowerCamelCase = {
'q': query,
'tbm': 'isch',
'hl': 'en',
'ijn': '0',
}
_lowerCamelCase = requests.get('https://www.google.com/search' , params=snake_case , headers=snake_case )
_lowerCamelCase = BeautifulSoup(html.text , 'html.parser' )
_lowerCamelCase = ''.join(
re.findall(r'AF_initDataCallback\(([^<]+)\);' , str(soup.select('script' ) ) ) )
_lowerCamelCase = json.dumps(snake_case )
_lowerCamelCase = json.loads(snake_case )
_lowerCamelCase = re.findall(
r'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",' , snake_case , )
if not matched_google_image_data:
return 0
_lowerCamelCase = re.sub(
r'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]' , '' , str(snake_case ) , )
_lowerCamelCase = re.findall(
r'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]' , snake_case , )
for index, fixed_full_res_image in enumerate(snake_case ):
if index >= max_images:
return index
_lowerCamelCase = bytes(snake_case , 'ascii' ).decode(
'unicode-escape' )
_lowerCamelCase = bytes(snake_case , 'ascii' ).decode(
'unicode-escape' )
_lowerCamelCase = urllib.request.build_opener()
_lowerCamelCase = [
(
'User-Agent',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582',
)
]
urllib.request.install_opener(snake_case )
_lowerCamelCase = f'query_{query.replace(" " , "_" )}'
if not os.path.exists(snake_case ):
os.makedirs(snake_case )
urllib.request.urlretrieve( # noqa: S310
snake_case , f'{path_name}/original_size_img_{index}.jpg' )
return index
if __name__ == "__main__":
try:
A_ : Any =download_images_from_google_query(sys.argv[1])
print(f'{image_count} images were downloaded to disk.')
except IndexError:
print("""Please provide a search term.""")
raise
| 650 | 1 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = 42
class __UpperCAmelCase ( __A , __A ):
"""simple docstring"""
@register_to_config
def __init__( self , __A = 32 , __A = 64 , __A = 20 , __A = 768 , __A=77 , __A=4 , __A = 0.0 , __A = "silu" , __A = None , __A = None , __A = "linear" , __A = "prd" , __A = None , __A = None , __A = None , ):
super().__init__()
__a = num_attention_heads
__a = attention_head_dim
__a = num_attention_heads * attention_head_dim
__a = additional_embeddings
__a = time_embed_dim or inner_dim
__a = embedding_proj_dim or embedding_dim
__a = clip_embed_dim or embedding_dim
__a = Timesteps(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0 )
__a = TimestepEmbedding(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , out_dim=_SCREAMING_SNAKE_CASE , act_fn=_SCREAMING_SNAKE_CASE )
__a = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if embedding_proj_norm_type is None:
__a = None
elif embedding_proj_norm_type == "layer":
__a = nn.LayerNorm(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(f'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
__a = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if encoder_hid_proj_type is None:
__a = None
elif encoder_hid_proj_type == "linear":
__a = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
raise ValueError(f'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
__a = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , _SCREAMING_SNAKE_CASE ) )
if added_emb_type == "prd":
__a = nn.Parameter(torch.zeros(1 , 1 , _SCREAMING_SNAKE_CASE ) )
elif added_emb_type is None:
__a = None
else:
raise ValueError(
f'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
__a = nn.ModuleList(
[
BasicTransformerBlock(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dropout=_SCREAMING_SNAKE_CASE , activation_fn="""gelu""" , attention_bias=_SCREAMING_SNAKE_CASE , )
for d in range(_SCREAMING_SNAKE_CASE )
] )
if norm_in_type == "layer":
__a = nn.LayerNorm(_SCREAMING_SNAKE_CASE )
elif norm_in_type is None:
__a = None
else:
raise ValueError(f'''Unsupported norm_in_type: {norm_in_type}.''' )
__a = nn.LayerNorm(_SCREAMING_SNAKE_CASE )
__a = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
__a = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , _SCREAMING_SNAKE_CASE , persistent=_SCREAMING_SNAKE_CASE )
__a = nn.Parameter(torch.zeros(1 , _SCREAMING_SNAKE_CASE ) )
__a = nn.Parameter(torch.zeros(1 , _SCREAMING_SNAKE_CASE ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def snake_case_ ( self ):
__a = {}
def fn_recursive_add_processors(__A , __A , __A ):
if hasattr(_SCREAMING_SNAKE_CASE , """set_processor""" ):
__a = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return processors
def snake_case_ ( self , __A ):
__a = len(self.attn_processors.keys() )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(_SCREAMING_SNAKE_CASE ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(_SCREAMING_SNAKE_CASE )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(__A , __A , __A ):
if hasattr(_SCREAMING_SNAKE_CASE , """set_processor""" ):
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
module.set_processor(_SCREAMING_SNAKE_CASE )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for name, module in self.named_children():
fn_recursive_attn_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def snake_case_ ( self ):
self.set_attn_processor(AttnProcessor() )
def snake_case_ ( self , __A , __A , __A , __A = None , __A = None , __A = True , ):
__a = hidden_states.shape[0]
__a = timestep
if not torch.is_tensor(_SCREAMING_SNAKE_CASE ):
__a = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(_SCREAMING_SNAKE_CASE ) and len(timesteps.shape ) == 0:
__a = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__a = timesteps * torch.ones(_SCREAMING_SNAKE_CASE , dtype=timesteps.dtype , device=timesteps.device )
__a = self.time_proj(_SCREAMING_SNAKE_CASE )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
__a = timesteps_projected.to(dtype=self.dtype )
__a = self.time_embedding(_SCREAMING_SNAKE_CASE )
if self.embedding_proj_norm is not None:
__a = self.embedding_proj_norm(_SCREAMING_SNAKE_CASE )
__a = self.embedding_proj(_SCREAMING_SNAKE_CASE )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
__a = self.encoder_hidden_states_proj(_SCREAMING_SNAKE_CASE )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
__a = self.proj_in(_SCREAMING_SNAKE_CASE )
__a = self.positional_embedding.to(hidden_states.dtype )
__a = []
__a = 0
if encoder_hidden_states is not None:
additional_embeds.append(_SCREAMING_SNAKE_CASE )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
__a = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
__a = hidden_states[:, None, :]
__a = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
__a = self.prd_embedding.to(hidden_states.dtype ).expand(_SCREAMING_SNAKE_CASE , -1 , -1 )
additional_embeds.append(_SCREAMING_SNAKE_CASE )
__a = torch.cat(
_SCREAMING_SNAKE_CASE , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
__a = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
__a = F.pad(
_SCREAMING_SNAKE_CASE , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
__a = hidden_states + positional_embeddings
if attention_mask is not None:
__a = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
__a = F.pad(_SCREAMING_SNAKE_CASE , (0, self.additional_embeddings) , value=0.0 )
__a = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
__a = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
__a = self.norm_in(_SCREAMING_SNAKE_CASE )
for block in self.transformer_blocks:
__a = block(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
__a = self.norm_out(_SCREAMING_SNAKE_CASE )
if self.prd_embedding is not None:
__a = hidden_states[:, -1]
else:
__a = hidden_states[:, additional_embeddings_len:]
__a = self.proj_to_clip_embeddings(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_SCREAMING_SNAKE_CASE )
def snake_case_ ( self , __A ):
__a = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents | 713 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def a (lowerCAmelCase__ ):
__a = botoa.client("""iam""" )
__a = {
"""Version""": """2012-10-17""",
"""Statement""": [
{"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=lowerCAmelCase__ , AssumeRolePolicyDocument=json.dumps(lowerCAmelCase__ , indent=2 ) )
__a = {
"""Version""": """2012-10-17""",
"""Statement""": [
{
"""Effect""": """Allow""",
"""Action""": [
"""sagemaker:*""",
"""ecr:GetDownloadUrlForLayer""",
"""ecr:BatchGetImage""",
"""ecr:BatchCheckLayerAvailability""",
"""ecr:GetAuthorizationToken""",
"""cloudwatch:PutMetricData""",
"""cloudwatch:GetMetricData""",
"""cloudwatch:GetMetricStatistics""",
"""cloudwatch:ListMetrics""",
"""logs:CreateLogGroup""",
"""logs:CreateLogStream""",
"""logs:DescribeLogStreams""",
"""logs:PutLogEvents""",
"""logs:GetLogEvents""",
"""s3:CreateBucket""",
"""s3:ListBucket""",
"""s3:GetBucketLocation""",
"""s3:GetObject""",
"""s3:PutObject""",
],
"""Resource""": """*""",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=lowerCAmelCase__ , PolicyName=f'''{role_name}_policy_permission''' , PolicyDocument=json.dumps(lowerCAmelCase__ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f'''role {role_name} already exists. Using existing one''' )
def a (lowerCAmelCase__ ):
__a = botoa.client("""iam""" )
return iam_client.get_role(RoleName=lowerCAmelCase__ )["Role"]["Arn"]
def a ():
__a = _ask_options(
"""How do you want to authorize?""" , ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """] , lowerCAmelCase__ , )
__a = None
if credentials_configuration == 0:
__a = _ask_field("""Enter your AWS Profile name: [default] """ , default="""default""" )
__a = aws_profile
else:
print(
"""Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"""
"""`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" )
__a = _ask_field("""AWS Access Key ID: """ )
__a = aws_access_key_id
__a = _ask_field("""AWS Secret Access Key: """ )
__a = aws_secret_access_key
__a = _ask_field("""Enter your AWS Region: [us-east-1]""" , default="""us-east-1""" )
__a = aws_region
__a = _ask_options(
"""Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""" , ["""Provide IAM Role name""", """Create new IAM role using credentials"""] , lowerCAmelCase__ , )
if role_management == 0:
__a = _ask_field("""Enter your IAM role name: """ )
else:
__a = """accelerate_sagemaker_execution_role"""
print(f'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' )
_create_iam_role_for_sagemaker(lowerCAmelCase__ )
__a = _ask_field(
"""Do you want to use custom Docker image? [yes/NO]: """ , _convert_yes_no_to_bool , default=lowerCAmelCase__ , error_message="""Please enter yes or no.""" , )
__a = None
if is_custom_docker_image:
__a = _ask_field("""Enter your Docker image: """ , lambda lowerCAmelCase__ : str(lowerCAmelCase__ ).lower() )
__a = _ask_field(
"""Do you want to provide SageMaker input channels with data locations? [yes/NO]: """ , _convert_yes_no_to_bool , default=lowerCAmelCase__ , error_message="""Please enter yes or no.""" , )
__a = None
if is_sagemaker_inputs_enabled:
__a = _ask_field(
"""Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """ , lambda lowerCAmelCase__ : str(lowerCAmelCase__ ).lower() , )
__a = _ask_field(
"""Do you want to enable SageMaker metrics? [yes/NO]: """ , _convert_yes_no_to_bool , default=lowerCAmelCase__ , error_message="""Please enter yes or no.""" , )
__a = None
if is_sagemaker_metrics_enabled:
__a = _ask_field(
"""Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """ , lambda lowerCAmelCase__ : str(lowerCAmelCase__ ).lower() , )
__a = _ask_options(
"""What is the distributed mode?""" , ["""No distributed training""", """Data parallelism"""] , _convert_sagemaker_distributed_mode , )
__a = {}
__a = _ask_field(
"""Do you wish to optimize your script with torch dynamo?[yes/NO]:""" , _convert_yes_no_to_bool , default=lowerCAmelCase__ , error_message="""Please enter yes or no.""" , )
if use_dynamo:
__a = """dynamo_"""
__a = _ask_options(
"""Which dynamo backend would you like to use?""" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
__a = _ask_field(
"""Do you want to customize the defaults sent to torch.compile? [yes/NO]: """ , _convert_yes_no_to_bool , default=lowerCAmelCase__ , error_message="""Please enter yes or no.""" , )
if use_custom_options:
__a = _ask_options(
"""Which mode do you want to use?""" , lowerCAmelCase__ , lambda lowerCAmelCase__ : TORCH_DYNAMO_MODES[int(lowerCAmelCase__ )] , default="""default""" , )
__a = _ask_field(
"""Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """ , _convert_yes_no_to_bool , default=lowerCAmelCase__ , error_message="""Please enter yes or no.""" , )
__a = _ask_field(
"""Do you want to enable dynamic shape tracing? [yes/NO]: """ , _convert_yes_no_to_bool , default=lowerCAmelCase__ , error_message="""Please enter yes or no.""" , )
__a = """Which EC2 instance type you want to use for your training?"""
if distributed_type != SageMakerDistributedType.NO:
__a = _ask_options(
lowerCAmelCase__ , lowerCAmelCase__ , lambda lowerCAmelCase__ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(lowerCAmelCase__ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
__a = _ask_field(lowerCAmelCase__ , lambda lowerCAmelCase__ : str(lowerCAmelCase__ ).lower() , default="""ml.p3.2xlarge""" )
__a = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
__a = _ask_field(
"""How many machines do you want use? [1]: """ , lowerCAmelCase__ , default=1 , )
__a = _ask_options(
"""Do you wish to use FP16 or BF16 (mixed precision)?""" , ["""no""", """fp16""", """bf16""", """fp8"""] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"""Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" )
return SageMakerConfig(
image_uri=lowerCAmelCase__ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=lowerCAmelCase__ , use_cpu=lowerCAmelCase__ , dynamo_config=lowerCAmelCase__ , eca_instance_type=lowerCAmelCase__ , profile=lowerCAmelCase__ , region=lowerCAmelCase__ , iam_role_name=lowerCAmelCase__ , mixed_precision=lowerCAmelCase__ , num_machines=lowerCAmelCase__ , sagemaker_inputs_file=lowerCAmelCase__ , sagemaker_metrics_file=lowerCAmelCase__ , )
| 209 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class A_ ( unittest.TestCase ):
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict=7 , __SCREAMING_SNAKE_CASE : List[Any]=3 , __SCREAMING_SNAKE_CASE : Dict=30 , __SCREAMING_SNAKE_CASE : List[Any]=4_00 , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Dict=1 / 2_55 , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : Dict=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : Union[str, Any]=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : Tuple=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__a = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
__a = parent
__a = batch_size
__a = num_channels
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size
__a = do_rescale
__a = rescale_factor
__a = do_normalize
__a = image_mean
__a = image_std
__a = do_pad
def _UpperCAmelCase ( self : Optional[int] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def _UpperCAmelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any]=False ):
if not batched:
__a = image_inputs[0]
if isinstance(UpperCAmelCase_ , Image.Image ):
__a , __a = image.size
else:
__a , __a = image.shape[1], image.shape[2]
if w < h:
__a = int(self.size["shortest_edge"] * h / w )
__a = self.size["shortest_edge"]
elif w > h:
__a = self.size["shortest_edge"]
__a = int(self.size["shortest_edge"] * w / h )
else:
__a = self.size["shortest_edge"]
__a = self.size["shortest_edge"]
else:
__a = []
for image in image_inputs:
__a , __a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__a = max(UpperCAmelCase_ , key=lambda __SCREAMING_SNAKE_CASE : item[0] )[0]
__a = max(UpperCAmelCase_ , key=lambda __SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A_ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = DetrImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self : Tuple ):
__a = DetrImageProcessingTester(self )
@property
def _UpperCAmelCase ( self : str ):
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self : int ):
__a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , "image_mean" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "image_std" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "do_rescale" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "rescale_factor" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "do_resize" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "size" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "do_pad" ) )
def _UpperCAmelCase ( self : Union[str, Any] ):
__a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase_ )
__a = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCAmelCase_ )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase_ )
def _UpperCAmelCase ( self : str ):
pass
def _UpperCAmelCase ( self : Any ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a = self.image_processor_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ )
__a = image_processing(UpperCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCAmelCase ( self : int ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a = image_processing(UpperCAmelCase_ , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCAmelCase ( self : Optional[int] ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a = image_processing(UpperCAmelCase_ , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _UpperCAmelCase ( self : int ):
# prepare image and target
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
__a = json.loads(f.read() )
__a = {"image_id": 3_97_69, "annotations": target}
# encode them
__a = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50" )
__a = image_processing(images=UpperCAmelCase_ , annotations=UpperCAmelCase_ , return_tensors="pt" )
# verify pixel values
__a = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , UpperCAmelCase_ )
__a = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , UpperCAmelCase_ , atol=1E-4 ) )
# verify area
__a = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , UpperCAmelCase_ ) )
# verify boxes
__a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , UpperCAmelCase_ )
__a = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , UpperCAmelCase_ , atol=1E-3 ) )
# verify image_id
__a = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , UpperCAmelCase_ ) )
# verify is_crowd
__a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , UpperCAmelCase_ ) )
# verify class_labels
__a = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , UpperCAmelCase_ ) )
# verify orig_size
__a = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , UpperCAmelCase_ ) )
# verify size
__a = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , UpperCAmelCase_ ) )
@slow
def _UpperCAmelCase ( self : Optional[int] ):
# prepare image, target and masks_path
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
__a = json.loads(f.read() )
__a = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
__a = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
__a = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50-panoptic" )
__a = image_processing(images=UpperCAmelCase_ , annotations=UpperCAmelCase_ , masks_path=UpperCAmelCase_ , return_tensors="pt" )
# verify pixel values
__a = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , UpperCAmelCase_ )
__a = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , UpperCAmelCase_ , atol=1E-4 ) )
# verify area
__a = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , UpperCAmelCase_ ) )
# verify boxes
__a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , UpperCAmelCase_ )
__a = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , UpperCAmelCase_ , atol=1E-3 ) )
# verify image_id
__a = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , UpperCAmelCase_ ) )
# verify is_crowd
__a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , UpperCAmelCase_ ) )
# verify class_labels
__a = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , UpperCAmelCase_ ) )
# verify masks
__a = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , UpperCAmelCase_ )
# verify orig_size
__a = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , UpperCAmelCase_ ) )
# verify size
__a = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , UpperCAmelCase_ ) )
| 197 |
'''simple docstring'''
import socket
def __snake_case ( ):
snake_case_ = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
snake_case_ = socket.gethostname()
snake_case_ = 12_312
sock.connect((host, port) )
sock.send(b"Hello server!" )
with open("Received_file" , "wb" ) as out_file:
print("File opened" )
print("Receiving data..." )
while True:
snake_case_ = sock.recv(1_024 )
if not data:
break
out_file.write(lowercase )
print("Successfully received the file" )
sock.close()
print("Connection closed" )
if __name__ == "__main__":
main()
| 508 | 0 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowerCamelCase , lowerCamelCase=12 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=32 , lowerCamelCase=2 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=5_12 , lowerCamelCase=0.0_2 , lowerCamelCase=0 , lowerCamelCase=None , ):
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_input_mask
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = projection_dim
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = dropout
snake_case__ = attention_dropout
snake_case__ = max_position_embeddings
snake_case__ = initializer_range
snake_case__ = scope
snake_case__ = bos_token_id
def A_ ( self ):
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ = None
if self.use_input_mask:
snake_case__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
snake_case__ = input_mask.numpy()
snake_case__ , snake_case__ = input_mask.shape
snake_case__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase ):
snake_case__ = 1
snake_case__ = 0
snake_case__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(lowerCamelCase )
def A_ ( self ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
snake_case__ = TFBlipTextModel(config=lowerCamelCase )
snake_case__ = model(lowerCamelCase , attention_mask=lowerCamelCase , training=lowerCamelCase )
snake_case__ = model(lowerCamelCase , training=lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A_ ( self ):
snake_case__ = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ = config_and_inputs
snake_case__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase , unittest.TestCase ):
_A : Dict = (TFBlipTextModel,) if is_tf_available() else ()
_A : int = False
_A : Tuple = False
_A : int = False
def A_ ( self ):
snake_case__ = BlipTextModelTester(self )
snake_case__ = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 )
def A_ ( self ):
self.config_tester.run_common_tests()
def A_ ( self ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def A_ ( self ):
pass
def A_ ( self ):
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def A_ ( self ):
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def A_ ( self ):
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def A_ ( self ):
pass
@slow
def A_ ( self ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ = TFBlipTextModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def A_ ( self , lowerCamelCase=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=lowerCamelCase )
| 711 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
snake_case__ = ksize + 1
snake_case__ = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__lowerCAmelCase ):
for x in range(__lowerCAmelCase ):
# distance from center
snake_case__ = x - ksize // 2
snake_case__ = y - ksize // 2
# degree to radiant
snake_case__ = theta / 180 * np.pi
snake_case__ = np.cos(_theta )
snake_case__ = np.sin(_theta )
# get kernel x
snake_case__ = cos_theta * px + sin_theta * py
# get kernel y
snake_case__ = -sin_theta * px + cos_theta * py
# fill kernel
snake_case__ = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__magic_name__ = imread('''../image_data/lena.jpg''')
# turn image in gray scale value
__magic_name__ = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__magic_name__ = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
__magic_name__ = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__magic_name__ = out / out.max() * 255
__magic_name__ = out.astype(np.uinta)
imshow('''Original''', gray)
imshow('''Gabor filter with 20x20 mask and 6 directions''', out)
waitKey(0)
| 530 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : str = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : str = """blenderbot-small"""
_lowerCAmelCase : Tuple = ["""past_key_values"""]
_lowerCAmelCase : Tuple = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : str , lowercase_ : List[str]=50265 , lowercase_ : Tuple=512 , lowercase_ : Optional[int]=8 , lowercase_ : List[str]=2048 , lowercase_ : List[str]=16 , lowercase_ : Tuple=8 , lowercase_ : List[Any]=2048 , lowercase_ : int=16 , lowercase_ : Any=0.0 , lowercase_ : List[str]=0.0 , lowercase_ : Optional[Any]=True , lowercase_ : Dict=True , lowercase_ : List[Any]="gelu" , lowercase_ : List[Any]=512 , lowercase_ : List[str]=0.1 , lowercase_ : Tuple=0.0 , lowercase_ : str=0.0 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Union[str, Any]=1 , lowercase_ : int=False , lowercase_ : Union[str, Any]=0 , lowercase_ : str=1 , lowercase_ : Any=2 , lowercase_ : int=2 , **lowercase_ : Optional[int] , ):
snake_case_ : Optional[int] = vocab_size
snake_case_ : Union[str, Any] = max_position_embeddings
snake_case_ : Optional[Any] = d_model
snake_case_ : List[Any] = encoder_ffn_dim
snake_case_ : List[str] = encoder_layers
snake_case_ : Any = encoder_attention_heads
snake_case_ : List[str] = decoder_ffn_dim
snake_case_ : str = decoder_layers
snake_case_ : Optional[int] = decoder_attention_heads
snake_case_ : Optional[int] = dropout
snake_case_ : int = attention_dropout
snake_case_ : List[str] = activation_dropout
snake_case_ : int = activation_function
snake_case_ : Any = init_std
snake_case_ : Dict = encoder_layerdrop
snake_case_ : Optional[Any] = decoder_layerdrop
snake_case_ : Any = use_cache
snake_case_ : Tuple = encoder_layers
snake_case_ : int = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , forced_eos_token_id=lowercase_ , **lowercase_ , )
class _UpperCAmelCase ( lowerCAmelCase__):
@property
def _snake_case ( self : Dict ):
if self.task in ["default", "seq2seq-lm"]:
snake_case_ : Optional[int] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
snake_case_ : Optional[int] = {0: '''batch'''}
snake_case_ : Tuple = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
snake_case_ : str = {0: '''batch''', 1: '''decoder_sequence'''}
snake_case_ : List[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
snake_case_ : Dict = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
snake_case_, snake_case_ : Any = self.num_layers
for i in range(lowercase_ ):
snake_case_ : Tuple = {0: '''batch''', 2: '''past_sequence + sequence'''}
snake_case_ : str = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
snake_case_ : int = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def _snake_case ( self : Optional[Any] ):
if self.task in ["default", "seq2seq-lm"]:
snake_case_ : Optional[Any] = super().outputs
else:
snake_case_ : Optional[Any] = super(lowercase_ , self ).outputs
if self.use_past:
snake_case_, snake_case_ : Any = self.num_layers
for i in range(lowercase_ ):
snake_case_ : List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''}
snake_case_ : int = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def _snake_case ( self : List[str] , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ):
snake_case_ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Generate decoder inputs
snake_case_ : Any = seq_length if not self.use_past else 1
snake_case_ : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
snake_case_ : Optional[Any] = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
snake_case_ : List[Any] = dict(**lowercase_ , **lowercase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
snake_case_, snake_case_ : Dict = common_inputs['''input_ids'''].shape
snake_case_ : Optional[Any] = common_inputs['''decoder_input_ids'''].shape[1]
snake_case_, snake_case_ : int = self.num_attention_heads
snake_case_ : Dict = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case_ : List[Any] = decoder_seq_length + 3
snake_case_ : List[Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
snake_case_ : Union[str, Any] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowercase_ , lowercase_ )] , dim=1 )
snake_case_ : Dict = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
snake_case_, snake_case_ : Optional[Any] = self.num_layers
snake_case_ : Union[str, Any] = min(lowercase_ , lowercase_ )
snake_case_ : str = max(lowercase_ , lowercase_ ) - min_num_layers
snake_case_ : Optional[int] = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowercase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowercase_ ),
torch.zeros(lowercase_ ),
torch.zeros(lowercase_ ),
torch.zeros(lowercase_ ),
) )
# TODO: test this.
snake_case_ : Any = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowercase_ , lowercase_ ):
common_inputs["past_key_values"].append((torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) )
return common_inputs
def _snake_case ( self : Union[str, Any] , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ):
snake_case_ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
snake_case_, snake_case_ : List[str] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
snake_case_ : Optional[int] = seqlen + 2
snake_case_, snake_case_ : Any = self.num_layers
snake_case_, snake_case_ : Optional[int] = self.num_attention_heads
snake_case_ : Optional[int] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case_ : Any = common_inputs['''attention_mask'''].dtype
snake_case_ : Tuple = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowercase_ , lowercase_ , dtype=lowercase_ )] , dim=1 )
snake_case_ : Tuple = [
(torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) for _ in range(lowercase_ )
]
return common_inputs
def _snake_case ( self : Union[str, Any] , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case_ : Dict = compute_effective_axis_dimension(
lowercase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case_ : Optional[Any] = tokenizer.num_special_tokens_to_add(lowercase_ )
snake_case_ : Optional[Any] = compute_effective_axis_dimension(
lowercase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase_ )
# Generate dummy inputs according to compute batch and sequence
snake_case_ : Dict = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
snake_case_ : str = dict(tokenizer(lowercase_ , return_tensors=lowercase_ ) )
return common_inputs
def _snake_case ( self : str , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
snake_case_ : Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ )
elif self.task == "causal-lm":
snake_case_ : int = self._generate_dummy_inputs_for_causal_lm(
lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ )
else:
snake_case_ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ )
return common_inputs
def _snake_case ( self : Dict , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : List[str] ):
if self.task in ["default", "seq2seq-lm"]:
snake_case_ : Optional[int] = super()._flatten_past_key_values_(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
else:
snake_case_ : List[str] = super(lowercase_ , self )._flatten_past_key_values_(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
| 123 |
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
lowercase__ : str = datasets.logging.get_logger(__name__)
lowercase__ : List[Any] = '''\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",
author = "Moosavi, Nafise Sadat and
Strube, Michael",
booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2016",
address = "Berlin, Germany",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P16-1060",
doi = "10.18653/v1/P16-1060",
pages = "632--642",
}
'''
lowercase__ : Optional[Any] = '''\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
'''
lowercase__ : str = '''
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting \'keep_singletons=False\', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
\'mentions\': mentions
\'muc\': MUC metric [Vilain et al, 1995]
\'bcub\': B-cubed [Bagga and Baldwin, 1998]
\'ceafe\': CEAFe [Luo et al., 2005]
\'lea\': LEA [Moosavi and Strube, 2016]
\'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric(\'coval\')
>>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',
... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',
... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',
... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',
... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',
... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}
'''
def __lowercase ( _a , _a , _a=False , _a=False , _a=True , _a=False , _a="dummy_doc" ):
snake_case_ : Union[str, Any] = {doc: key_lines}
snake_case_ : int = {doc: sys_lines}
snake_case_ : Optional[Any] = {}
snake_case_ : Dict = 0
snake_case_ : List[Any] = 0
snake_case_ : str = 0
snake_case_ : Dict = 0
snake_case_ : Optional[Any] = 0
snake_case_ : Any = 0
snake_case_, snake_case_ : Dict = reader.get_doc_mentions(_a , key_doc_lines[doc] , _a )
key_singletons_num += singletons_num
if NP_only or min_span:
snake_case_ : List[Any] = reader.set_annotated_parse_trees(_a , key_doc_lines[doc] , _a , _a )
snake_case_, snake_case_ : str = reader.get_doc_mentions(_a , sys_doc_lines[doc] , _a )
sys_singletons_num += singletons_num
if NP_only or min_span:
snake_case_ : Optional[Any] = reader.set_annotated_parse_trees(_a , key_doc_lines[doc] , _a , _a )
if remove_nested:
snake_case_, snake_case_ : Union[str, Any] = reader.remove_nested_coref_mentions(_a , _a )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
snake_case_, snake_case_ : Optional[int] = reader.remove_nested_coref_mentions(_a , _a )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
snake_case_ : List[Any] = reader.get_mention_assignments(_a , _a )
snake_case_ : Optional[Any] = reader.get_mention_assignments(_a , _a )
snake_case_ : List[Any] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
f"annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}" )
logger.info(
'''Number of resulting singleton clusters in the key '''
f"annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}" )
if not keep_singletons:
logger.info(
f"{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system "
'''files, respectively''' )
return doc_coref_infos
def __lowercase ( _a , _a , _a , _a , _a , _a , _a ):
snake_case_ : str = get_coref_infos(_a , _a , _a , _a , _a , _a )
snake_case_ : Any = {}
snake_case_ : List[str] = 0
snake_case_ : Any = 0
for name, metric in metrics:
snake_case_, snake_case_, snake_case_ : Union[str, Any] = evaluator.evaluate_documents(_a , _a , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"{name}/recall": recall, f"{name}/precision": precision, f"{name}/f1": fa} )
logger.info(
name.ljust(10 ) , f"Recall: {recall * 100:.2f}" , f" Precision: {precision * 100:.2f}" , f" F1: {fa * 100:.2f}" , )
if conll_subparts_num == 3:
snake_case_ : Optional[Any] = (conll / 3) * 100
logger.info(f"CoNLL score: {conll:.2f}" )
output_scores.update({'''conll_score''': conll} )
return output_scores
def __lowercase ( _a ):
snake_case_ : Any = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
snake_case_ : List[str] = line.split()[5]
if not parse_col == "-":
snake_case_ : Optional[int] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _UpperCAmelCase ( datasets.Metric):
def _snake_case ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def _snake_case ( self : str , lowercase_ : Dict , lowercase_ : Any , lowercase_ : List[Any]=True , lowercase_ : Tuple=False , lowercase_ : Any=False , lowercase_ : List[Any]=False ):
snake_case_ : Optional[int] = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
snake_case_ : str = util.check_gold_parse_annotation(lowercase_ )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
snake_case_ : Optional[Any] = evaluate(
key_lines=lowercase_ , sys_lines=lowercase_ , metrics=lowercase_ , NP_only=lowercase_ , remove_nested=lowercase_ , keep_singletons=lowercase_ , min_span=lowercase_ , )
return score
| 123 | 1 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
UpperCamelCase_ = logging.getLogger(__name__)
class __UpperCAmelCase ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = "sequence-classification"
def __init__( self , _UpperCAmelCase ):
if type(_UpperCAmelCase ) == dict:
UpperCAmelCase__ : Any = Namespace(**_UpperCAmelCase )
UpperCAmelCase__ : int = glue_output_modes[hparams.task]
UpperCAmelCase__ : Any = glue_tasks_num_labels[hparams.task]
super().__init__(_UpperCAmelCase , _UpperCAmelCase , self.mode )
def lowerCamelCase ( self , **_UpperCAmelCase ):
return self.model(**_UpperCAmelCase )
def lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase__ : Optional[int] = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
UpperCAmelCase__ : str = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
UpperCAmelCase__ : Union[str, Any] = self(**_UpperCAmelCase )
UpperCAmelCase__ : List[Any] = outputs[0]
UpperCAmelCase__ : str = self.trainer.lr_schedulers[0]['''scheduler''']
UpperCAmelCase__ : Union[str, Any] = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def lowerCamelCase ( self ):
UpperCAmelCase__ : Union[str, Any] = self.hparams
UpperCAmelCase__ : str = processors[args.task]()
UpperCAmelCase__ : Optional[int] = processor.get_labels()
for mode in ["train", "dev"]:
UpperCAmelCase__ : List[str] = self._feature_file(_UpperCAmelCase )
if os.path.exists(_UpperCAmelCase ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , _UpperCAmelCase )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
UpperCAmelCase__ : Optional[int] = (
processor.get_dev_examples(args.data_dir )
if mode == '''dev'''
else processor.get_train_examples(args.data_dir )
)
UpperCAmelCase__ : Optional[int] = convert_examples_to_features(
_UpperCAmelCase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('''Saving features into cached file %s''' , _UpperCAmelCase )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False ):
UpperCAmelCase__ : Tuple = '''dev''' if mode == '''test''' else mode
UpperCAmelCase__ : Any = self._feature_file(_UpperCAmelCase )
logger.info('''Loading features from cached file %s''' , _UpperCAmelCase )
UpperCAmelCase__ : Optional[Any] = torch.load(_UpperCAmelCase )
UpperCAmelCase__ : int = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
UpperCAmelCase__ : str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
UpperCAmelCase__ : Tuple = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
UpperCAmelCase__ : int = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
UpperCAmelCase__ : int = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , batch_size=_UpperCAmelCase , shuffle=_UpperCAmelCase , )
def lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase__ : Any = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
UpperCAmelCase__ : Any = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
UpperCAmelCase__ : Union[str, Any] = self(**_UpperCAmelCase )
UpperCAmelCase__ : List[Any] = outputs[:2]
UpperCAmelCase__ : Union[str, Any] = logits.detach().cpu().numpy()
UpperCAmelCase__ : int = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def lowerCamelCase ( self , _UpperCAmelCase ):
UpperCAmelCase__ : Optional[Any] = torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item()
UpperCAmelCase__ : List[str] = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
UpperCAmelCase__ : Optional[Any] = np.argmax(_UpperCAmelCase , axis=1 )
elif self.hparams.glue_output_mode == "regression":
UpperCAmelCase__ : int = np.squeeze(_UpperCAmelCase )
UpperCAmelCase__ : Any = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
UpperCAmelCase__ : int = [[] for _ in range(out_label_ids.shape[0] )]
UpperCAmelCase__ : Tuple = [[] for _ in range(out_label_ids.shape[0] )]
UpperCAmelCase__ : Any = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task , _UpperCAmelCase , _UpperCAmelCase )}
UpperCAmelCase__ : Optional[Any] = dict(results.items() )
UpperCAmelCase__ : str = results
return ret, preds_list, out_label_list
def lowerCamelCase ( self , _UpperCAmelCase ):
UpperCAmelCase__ : Dict = self._eval_end(_UpperCAmelCase )
UpperCAmelCase__ : Any = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def lowerCamelCase ( self , _UpperCAmelCase ):
UpperCAmelCase__ : Union[str, Any] = self._eval_end(_UpperCAmelCase )
UpperCAmelCase__ : Optional[Any] = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def lowerCamelCase ( _UpperCAmelCase , _UpperCAmelCase ):
BaseTransformer.add_model_specific_args(_UpperCAmelCase , _UpperCAmelCase )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=_UpperCAmelCase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--task''' , default='''''' , type=_UpperCAmelCase , required=_UpperCAmelCase , help='''The GLUE task to run''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=_UpperCAmelCase , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
def lowerCAmelCase__ ( ) -> Union[str, Any]:
UpperCAmelCase__ : Tuple = argparse.ArgumentParser()
add_generic_args(__A , os.getcwd() )
UpperCAmelCase__ : Any = GLUETransformer.add_model_specific_args(__A , os.getcwd() )
UpperCAmelCase__ : Optional[Any] = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
UpperCAmelCase__ : Tuple = os.path.join(
'''./results''' , f"""{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}""" , )
os.makedirs(args.output_dir )
UpperCAmelCase__ : str = GLUETransformer(__A )
UpperCAmelCase__ : Any = generic_train(__A , __A )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
UpperCAmelCase__ : Tuple = sorted(glob.glob(os.path.join(args.output_dir , '''checkpoint-epoch=*.ckpt''' ) , recursive=__A ) )
UpperCAmelCase__ : int = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__A )
if __name__ == "__main__":
main() | 708 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=224 , _UpperCAmelCase=1000 , _UpperCAmelCase=[3, 3, 6, 4] , _UpperCAmelCase=[48, 56, 112, 220] , ):
UpperCAmelCase__ : List[str] = parent
UpperCAmelCase__ : Union[str, Any] = batch_size
UpperCAmelCase__ : Tuple = num_channels
UpperCAmelCase__ : Tuple = is_training
UpperCAmelCase__ : Union[str, Any] = use_labels
UpperCAmelCase__ : List[Any] = hidden_dropout_prob
UpperCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[int] = num_labels
UpperCAmelCase__ : Optional[Any] = image_size
UpperCAmelCase__ : int = layer_depths
UpperCAmelCase__ : List[str] = embed_dims
def lowerCamelCase ( self ):
UpperCAmelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_UpperCAmelCase , layer_scale_init_value=1E-5 , )
def lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase__ : List[str] = SwiftFormerModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ : List[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase__ : List[str] = self.num_labels
UpperCAmelCase__ : str = SwiftFormerForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ : Optional[Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
UpperCAmelCase__ : Optional[Any] = SwiftFormerForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Optional[int] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self ):
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase__ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : Union[str, Any] = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : List[Any] = False
def lowerCamelCase ( self ):
UpperCAmelCase__ : Tuple = SwiftFormerModelTester(self )
UpperCAmelCase__ : Any = ConfigTester(
self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def lowerCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' )
def lowerCamelCase ( self ):
pass
def lowerCamelCase ( self ):
UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[Any] = model_class(_UpperCAmelCase )
UpperCAmelCase__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def lowerCamelCase ( self ):
UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Union[str, Any] = model_class(_UpperCAmelCase )
UpperCAmelCase__ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Union[str, Any] = [*signature.parameters.keys()]
UpperCAmelCase__ : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowerCamelCase ( self ):
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowerCamelCase ( self ):
UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def lowerCamelCase ( self ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : List[Any] = SwiftFormerModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@unittest.skip(reason='''SwiftFormer does not output attentions''' )
def lowerCamelCase ( self ):
pass
def lowerCamelCase ( self ):
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase__ : str = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : str = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
UpperCAmelCase__ : Optional[int] = outputs.hidden_states
UpperCAmelCase__ : Optional[Any] = 8
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_UpperCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[Any] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ : int = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase ( self ):
def _config_zero_init(_UpperCAmelCase ):
UpperCAmelCase__ : str = copy.deepcopy(_UpperCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_UpperCAmelCase , _UpperCAmelCase , 1E-10 )
if isinstance(getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase ):
UpperCAmelCase__ : Union[str, Any] = _config_zero_init(getattr(_UpperCAmelCase , _UpperCAmelCase ) )
setattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return configs_no_init
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Optional[Any] = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
UpperCAmelCase__ : str = model_class(config=_UpperCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCamelCase ( self ):
pass
def lowerCAmelCase__ ( ) -> Optional[int]:
UpperCAmelCase__ : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase ( self ):
return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None
@slow
def lowerCamelCase ( self ):
UpperCAmelCase__ : int = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(_UpperCAmelCase )
UpperCAmelCase__ : Tuple = self.default_image_processor
UpperCAmelCase__ : str = prepare_img()
UpperCAmelCase__ : Tuple = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Optional[Any] = model(**_UpperCAmelCase )
# verify the logits
UpperCAmelCase__ : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
UpperCAmelCase__ : List[str] = torch.tensor([[-2.1703E00, 2.1107E00, -2.0811E00]] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) ) | 599 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.