code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = ['''image_processor''', '''tokenizer''']
snake_case_ = '''BlipImageProcessor'''
snake_case_ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
__lowerCamelCase = False
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = self.image_processor
def __call__( self , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = True , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 0 , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = True , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
__lowerCamelCase = self.tokenizer
__lowerCamelCase = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
return text_encoding
# add pixel_values
__lowerCamelCase = self.image_processor(lowerCamelCase__ , return_tensors=lowerCamelCase__ )
if text is not None:
__lowerCamelCase = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
else:
__lowerCamelCase = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase__ )
return encoding_image_processor
def lowercase_ ( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def lowercase_ ( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = self.tokenizer.model_input_names
__lowerCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 348 |
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__A = logging.get_logger("transformers.models.speecht5")
__A = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
__A = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
__A = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
__A = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
__A = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
__A = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
__A = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
__A = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
__A = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__A = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__A = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__A = []
__A = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
__A = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
__A = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
__A = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ) -> Dict:
"""simple docstring"""
for attribute in key.split('.' ):
__lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ )
if weight_type is not None:
__lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ ).shape
else:
__lowerCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__lowerCamelCase = value
elif weight_type == "weight_g":
__lowerCamelCase = value
elif weight_type == "weight_v":
__lowerCamelCase = value
elif weight_type == "bias":
__lowerCamelCase = value
elif weight_type == "running_mean":
__lowerCamelCase = value
elif weight_type == "running_var":
__lowerCamelCase = value
elif weight_type == "num_batches_tracked":
__lowerCamelCase = value
else:
__lowerCamelCase = value
logger.info(F"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict ) -> Any:
"""simple docstring"""
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
__lowerCamelCase , __lowerCamelCase = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowerCamelCase = []
if task == "s2t":
__lowerCamelCase = hf_model.speechta.encoder.prenet.feature_encoder
__lowerCamelCase = MAPPING_S2T
__lowerCamelCase = IGNORE_KEYS_S2T
elif task == "t2s":
__lowerCamelCase = None
__lowerCamelCase = MAPPING_T2S
__lowerCamelCase = IGNORE_KEYS_T2S
elif task == "s2s":
__lowerCamelCase = hf_model.speechta.encoder.prenet.feature_encoder
__lowerCamelCase = MAPPING_S2S
__lowerCamelCase = IGNORE_KEYS_S2S
else:
raise ValueError(F"""Unsupported task: {task}""" )
for name, value in fairseq_dict.items():
if should_ignore(UpperCamelCase__ , UpperCamelCase__ ):
logger.info(F"""{name} was ignored""" )
continue
__lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hf_model.config.feat_extract_norm == 'group' , )
__lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
__lowerCamelCase , __lowerCamelCase = key.split('.*.' )
if prefix in name and suffix in name:
__lowerCamelCase = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
__lowerCamelCase = True
if "*" in mapped_key:
__lowerCamelCase = name.split(UpperCamelCase__ )[0].split('.' )[-2]
__lowerCamelCase = mapped_key.replace('*' , UpperCamelCase__ )
if "weight_g" in name:
__lowerCamelCase = 'weight_g'
elif "weight_v" in name:
__lowerCamelCase = 'weight_v'
elif "bias" in name:
__lowerCamelCase = 'bias'
elif "weight" in name:
__lowerCamelCase = 'weight'
elif "running_mean" in name:
__lowerCamelCase = 'running_mean'
elif "running_var" in name:
__lowerCamelCase = 'running_var'
elif "num_batches_tracked" in name:
__lowerCamelCase = 'num_batches_tracked'
else:
__lowerCamelCase = None
set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
continue
if not is_used:
unused_weights.append(UpperCamelCase__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : List[str] ) -> Tuple:
"""simple docstring"""
__lowerCamelCase = full_name.split('conv_layers.' )[-1]
__lowerCamelCase = name.split('.' )
__lowerCamelCase = int(items[0] )
__lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCamelCase__ )
@torch.no_grad()
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : str=None , UpperCamelCase__ : List[str]=None , ) -> Tuple:
"""simple docstring"""
if config_path is not None:
__lowerCamelCase = SpeechTaConfig.from_pretrained(UpperCamelCase__ )
else:
__lowerCamelCase = SpeechTaConfig()
if task == "s2t":
__lowerCamelCase = config.max_text_positions
__lowerCamelCase = SpeechTaForSpeechToText(UpperCamelCase__ )
elif task == "t2s":
__lowerCamelCase = 1876
__lowerCamelCase = 600
__lowerCamelCase = config.max_speech_positions
__lowerCamelCase = SpeechTaForTextToSpeech(UpperCamelCase__ )
elif task == "s2s":
__lowerCamelCase = 1876
__lowerCamelCase = config.max_speech_positions
__lowerCamelCase = SpeechTaForSpeechToSpeech(UpperCamelCase__ )
else:
raise ValueError(F"""Unknown task name: {task}""" )
if vocab_path:
__lowerCamelCase = SpeechTaTokenizer(UpperCamelCase__ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
__lowerCamelCase = AddedToken('<mask>' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
__lowerCamelCase = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
__lowerCamelCase = SpeechTaFeatureExtractor()
__lowerCamelCase = SpeechTaProcessor(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
__lowerCamelCase = torch.load(UpperCamelCase__ )
recursively_load_weights(fairseq_checkpoint['model'] , UpperCamelCase__ , UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
if repo_id:
print('Pushing to the hub...' )
processor.push_to_hub(UpperCamelCase__ )
model.push_to_hub(UpperCamelCase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
__A = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 348 | 1 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowerCamelCase = []
for part_id in partition_order:
__lowerCamelCase = df.where(F"""SPARK_PARTITION_ID() = {part_id}""" ).collect()
for row_idx, row in enumerate(UpperCamelCase__ ):
expected_row_ids_and_row_dicts.append((F"""{part_id}_{row_idx}""", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase_ ( ) -> str:
"""simple docstring"""
__lowerCamelCase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
__lowerCamelCase = spark.range(100 ).repartition(1 )
__lowerCamelCase = Spark(UpperCamelCase__ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
__lowerCamelCase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
__lowerCamelCase = spark.range(10 ).repartition(2 )
__lowerCamelCase = [1, 0]
__lowerCamelCase = _generate_iterable_examples(UpperCamelCase__ , UpperCamelCase__ ) # Reverse the partitions.
__lowerCamelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCamelCase__ , UpperCamelCase__ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
__lowerCamelCase , __lowerCamelCase = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase_ ( ) -> str:
"""simple docstring"""
__lowerCamelCase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
__lowerCamelCase = spark.range(10 ).repartition(1 )
__lowerCamelCase = SparkExamplesIterable(UpperCamelCase__ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(UpperCamelCase__ ):
assert row_id == F"""0_{i}"""
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase_ ( ) -> int:
"""simple docstring"""
__lowerCamelCase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
__lowerCamelCase = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('numpy.random.Generator' ) as generator_mock:
__lowerCamelCase = lambda UpperCamelCase__ : x.reverse()
__lowerCamelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCamelCase__ , [2, 1, 0] )
__lowerCamelCase = SparkExamplesIterable(UpperCamelCase__ ).shuffle_data_sources(UpperCamelCase__ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(UpperCamelCase__ ):
__lowerCamelCase , __lowerCamelCase = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
__lowerCamelCase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
__lowerCamelCase = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
__lowerCamelCase = SparkExamplesIterable(UpperCamelCase__ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
__lowerCamelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCamelCase__ , [0, 2] )
for i, (row_id, row_dict) in enumerate(UpperCamelCase__ ):
__lowerCamelCase , __lowerCamelCase = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
__lowerCamelCase = SparkExamplesIterable(UpperCamelCase__ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
__lowerCamelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCamelCase__ , [1, 3] )
for i, (row_id, row_dict) in enumerate(UpperCamelCase__ ):
__lowerCamelCase , __lowerCamelCase = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase_ ( ) -> str:
"""simple docstring"""
__lowerCamelCase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
__lowerCamelCase = spark.range(100 ).repartition(1 )
__lowerCamelCase = Spark(UpperCamelCase__ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 348 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = [False] * len(UpperCamelCase__ )
__lowerCamelCase = [-1] * len(UpperCamelCase__ )
def dfs(UpperCamelCase__ : int , UpperCamelCase__ : int ):
__lowerCamelCase = True
__lowerCamelCase = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCamelCase__ , 1 - c )
for i in range(len(UpperCamelCase__ ) ):
if not visited[i]:
dfs(UpperCamelCase__ , 0 )
for i in range(len(UpperCamelCase__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
__A = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 348 | 1 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any]=None ) -> Dict:
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, F"""{torch_layer} layer.weight does not match"""
__lowerCamelCase = nn.Parameter(UpperCamelCase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F"""{torch_layer} layer.bias does not match"""
__lowerCamelCase = nn.Parameter(UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowerCamelCase = np.asarray(weights[0] )
__lowerCamelCase = np.asarray(weights[1] )
__lowerCamelCase = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def lowerCamelCase_ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowerCamelCase = np.asarray(weights[0] )
__lowerCamelCase = np.asarray(weights[1] )
__lowerCamelCase = np.asarray(weights[2] )
__lowerCamelCase = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] ) -> str:
"""simple docstring"""
__lowerCamelCase = weights[0][0][0]
__lowerCamelCase = np.asarray(layer_norm_a[0] )
__lowerCamelCase = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# lsh weights + output
__lowerCamelCase = weights[0][1]
if len(UpperCamelCase__ ) < 4:
set_layer_weights_in_torch_lsh(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ )
else:
set_layer_weights_in_torch_local(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ )
# intermediate weighs
__lowerCamelCase = weights[2][0][1][2]
# Chunked Feed Forward
if len(UpperCamelCase__ ) == 4:
__lowerCamelCase = intermediate_weights[2]
# layernorm 2
__lowerCamelCase = np.asarray(intermediate_weights[0][0] )
__lowerCamelCase = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# intermediate dense
__lowerCamelCase = np.asarray(intermediate_weights[1][0] )
__lowerCamelCase = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
# intermediate out
__lowerCamelCase = np.asarray(intermediate_weights[4][0] )
__lowerCamelCase = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int ) -> Tuple:
"""simple docstring"""
__lowerCamelCase = torch_model.reformer
# word embeds
__lowerCamelCase = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(UpperCamelCase__ ) , )
if isinstance(weights[3] , UpperCamelCase__ ):
__lowerCamelCase = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
__lowerCamelCase = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F"""{position_embeddings[emb_idx]} emb does not match"""
__lowerCamelCase = nn.Parameter(torch.tensor(UpperCamelCase__ ) )
__lowerCamelCase = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
UpperCamelCase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
__lowerCamelCase = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# output layer norm
__lowerCamelCase = np.asarray(weights[7][0] )
__lowerCamelCase = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# output embeddings
__lowerCamelCase = np.asarray(weights[9][0] )
__lowerCamelCase = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowerCamelCase = ReformerConfig.from_json_file(UpperCamelCase__ )
print(F"""Building PyTorch model from configuration: {config}""" )
__lowerCamelCase = ReformerModelWithLMHead(UpperCamelCase__ )
with open(UpperCamelCase__ , 'rb' ) as f:
__lowerCamelCase = pickle.load(UpperCamelCase__ )['weights']
set_model_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , config.hidden_size )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__A = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 348 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=lowerCamelCase__ ).to(lowerCamelCase__ )
__lowerCamelCase = AutoTokenizer.from_pretrained('google/mt5-small' )
__lowerCamelCase = tokenizer('Hello there' , return_tensors='pt' ).input_ids
__lowerCamelCase = tokenizer('Hi I am' , return_tensors='pt' ).input_ids
__lowerCamelCase = model(input_ids.to(lowerCamelCase__ ) , labels=labels.to(lowerCamelCase__ ) ).loss
__lowerCamelCase = -(labels.shape[-1] * loss.item())
__lowerCamelCase = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 348 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''sew-d'''
def __init__( self , lowerCamelCase__=32 , lowerCamelCase__=768 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__=3_072 , lowerCamelCase__=2 , lowerCamelCase__=512 , lowerCamelCase__=256 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=("p2c", "c2p") , lowerCamelCase__="layer_norm" , lowerCamelCase__="gelu_python" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.0 , lowerCamelCase__=0.1 , lowerCamelCase__=0.02 , lowerCamelCase__=1e-7 , lowerCamelCase__=1e-5 , lowerCamelCase__="group" , lowerCamelCase__="gelu" , lowerCamelCase__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowerCamelCase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCamelCase__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCamelCase__=False , lowerCamelCase__=128 , lowerCamelCase__=16 , lowerCamelCase__=True , lowerCamelCase__=0.05 , lowerCamelCase__=10 , lowerCamelCase__=2 , lowerCamelCase__=0.0 , lowerCamelCase__=10 , lowerCamelCase__=0 , lowerCamelCase__="mean" , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=256 , lowerCamelCase__=0 , lowerCamelCase__=1 , lowerCamelCase__=2 , **lowerCamelCase__ , ) -> Any:
'''simple docstring'''
super().__init__(**lowerCamelCase__ , pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ )
__lowerCamelCase = hidden_size
__lowerCamelCase = feat_extract_norm
__lowerCamelCase = feat_extract_activation
__lowerCamelCase = list(lowerCamelCase__ )
__lowerCamelCase = list(lowerCamelCase__ )
__lowerCamelCase = list(lowerCamelCase__ )
__lowerCamelCase = conv_bias
__lowerCamelCase = num_conv_pos_embeddings
__lowerCamelCase = num_conv_pos_embedding_groups
__lowerCamelCase = len(self.conv_dim )
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = intermediate_size
__lowerCamelCase = squeeze_factor
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = position_buckets
__lowerCamelCase = share_att_key
__lowerCamelCase = relative_attention
__lowerCamelCase = norm_rel_ebd
__lowerCamelCase = list(lowerCamelCase__ )
__lowerCamelCase = hidden_act
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = feat_proj_dropout
__lowerCamelCase = final_dropout
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = feature_layer_norm_eps
__lowerCamelCase = initializer_range
__lowerCamelCase = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCamelCase = apply_spec_augment
__lowerCamelCase = mask_time_prob
__lowerCamelCase = mask_time_length
__lowerCamelCase = mask_time_min_masks
__lowerCamelCase = mask_feature_prob
__lowerCamelCase = mask_feature_length
__lowerCamelCase = mask_feature_min_masks
# ctc loss
__lowerCamelCase = ctc_loss_reduction
__lowerCamelCase = ctc_zero_infinity
# sequence classification
__lowerCamelCase = use_weighted_layer_sum
__lowerCamelCase = classifier_proj_size
@property
def lowercase_ ( self ) -> Any:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 348 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> Any:
"""simple docstring"""
__lowerCamelCase = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2]
__lowerCamelCase = True if 'large' in model_name or 'huge' in model_name else False
__lowerCamelCase = True if 'large' in model_name or 'huge' in model_name else False
__lowerCamelCase = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__lowerCamelCase = [3, 3, 3, 3]
__lowerCamelCase = [5, 5, 5, 5]
elif "fl4" in model_name:
__lowerCamelCase = [4, 4, 4, 4]
__lowerCamelCase = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__lowerCamelCase = [3, 3, 3, 3]
if "lrf" in model_name:
__lowerCamelCase = [3, 3, 3, 3]
else:
__lowerCamelCase = [2, 2, 2, 2]
if "tiny" in model_name:
__lowerCamelCase = 96
elif "small" in model_name:
__lowerCamelCase = 96
elif "base" in model_name:
__lowerCamelCase = 128
elif "large" in model_name:
__lowerCamelCase = 192
elif "xlarge" in model_name:
__lowerCamelCase = 256
elif "huge" in model_name:
__lowerCamelCase = 352
# set label information
__lowerCamelCase = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
__lowerCamelCase = 'imagenet-22k-id2label.json'
else:
__lowerCamelCase = 'imagenet-1k-id2label.json'
__lowerCamelCase = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) )
__lowerCamelCase = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
__lowerCamelCase = {v: k for k, v in idalabel.items()}
__lowerCamelCase = FocalNetConfig(
embed_dim=UpperCamelCase__ , depths=UpperCamelCase__ , focal_levels=UpperCamelCase__ , focal_windows=UpperCamelCase__ , use_conv_embed=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ , use_post_layernorm=UpperCamelCase__ , use_layerscale=UpperCamelCase__ , )
return config
def lowerCamelCase_ ( UpperCamelCase__ : Any ) -> str:
"""simple docstring"""
if "patch_embed.proj" in name:
__lowerCamelCase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__lowerCamelCase = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
__lowerCamelCase = 'encoder.' + name
if "encoder.layers" in name:
__lowerCamelCase = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
__lowerCamelCase = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
__lowerCamelCase = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__lowerCamelCase = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__lowerCamelCase = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__lowerCamelCase = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
__lowerCamelCase = 'layernorm.weight'
if name == "norm.bias":
__lowerCamelCase = 'layernorm.bias'
if "head" in name:
__lowerCamelCase = name.replace('head' , 'classifier' )
else:
__lowerCamelCase = 'focalnet.' + name
return name
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=False ) -> Dict:
"""simple docstring"""
__lowerCamelCase = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
__lowerCamelCase = model_name_to_url[model_name]
print('Checkpoint URL: ' , UpperCamelCase__ )
__lowerCamelCase = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
__lowerCamelCase = state_dict.pop(UpperCamelCase__ )
__lowerCamelCase = val
__lowerCamelCase = get_focalnet_config(UpperCamelCase__ )
__lowerCamelCase = FocalNetForImageClassification(UpperCamelCase__ )
model.eval()
# load state dict
model.load_state_dict(UpperCamelCase__ )
# verify conversion
__lowerCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCamelCase = BitImageProcessor(
do_resize=UpperCamelCase__ , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCamelCase__ , crop_size=224 , do_normalize=UpperCamelCase__ , image_mean=UpperCamelCase__ , image_std=UpperCamelCase__ , )
__lowerCamelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
__lowerCamelCase = processor(images=UpperCamelCase__ , return_tensors='pt' )
__lowerCamelCase = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
__lowerCamelCase = image_transforms(UpperCamelCase__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , UpperCamelCase__ , atol=1E-4 )
__lowerCamelCase = model(**UpperCamelCase__ )
__lowerCamelCase = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__lowerCamelCase = torch.tensor([0.21_66, -0.43_68, 0.21_91] )
elif model_name == "focalnet-tiny-lrf":
__lowerCamelCase = torch.tensor([1.16_69, 0.01_25, -0.16_95] )
elif model_name == "focalnet-small":
__lowerCamelCase = torch.tensor([0.49_17, -0.04_30, 0.13_41] )
elif model_name == "focalnet-small-lrf":
__lowerCamelCase = torch.tensor([-0.25_88, -0.53_42, -0.23_31] )
elif model_name == "focalnet-base":
__lowerCamelCase = torch.tensor([-0.16_55, -0.40_90, -0.17_30] )
elif model_name == "focalnet-base-lrf":
__lowerCamelCase = torch.tensor([0.53_06, -0.04_83, -0.39_28] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print(F"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(F"""{model_name}""" )
processor.push_to_hub(F"""{model_name}""" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="focalnet-tiny",
type=str,
help="Name of the FocalNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub.",
)
__A = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 348 | 1 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__="resnet50" , lowerCamelCase__=3 , lowerCamelCase__=32 , lowerCamelCase__=3 , lowerCamelCase__=True , lowerCamelCase__=True , ) -> str:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = out_indices if out_indices is not None else [4]
__lowerCamelCase = stage_names
__lowerCamelCase = out_features
__lowerCamelCase = backbone
__lowerCamelCase = batch_size
__lowerCamelCase = image_size
__lowerCamelCase = num_channels
__lowerCamelCase = use_pretrained_backbone
__lowerCamelCase = is_training
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase = self.get_config()
return config, pixel_values
def lowercase_ ( self ) -> Any:
'''simple docstring'''
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
__lowerCamelCase = TimmBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (TimmBackbone,) if is_torch_available() else ()
snake_case_ = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = TimmBackboneModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = 'resnet18'
__lowerCamelCase = 'microsoft/resnet-18'
__lowerCamelCase = AutoBackbone.from_pretrained(lowerCamelCase__ , use_timm_backbone=lowerCamelCase__ )
__lowerCamelCase = AutoBackbone.from_pretrained(lowerCamelCase__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__lowerCamelCase = AutoBackbone.from_pretrained(lowerCamelCase__ , use_timm_backbone=lowerCamelCase__ , out_indices=[1, 2, 3] )
__lowerCamelCase = AutoBackbone.from_pretrained(lowerCamelCase__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def lowercase_ ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def lowercase_ ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip('Safetensors is not supported by timm.' )
def lowercase_ ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase_ ( self ) -> int:
'''simple docstring'''
pass
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(lowerCamelCase__ )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = True
__lowerCamelCase = self.has_attentions
# no need to test all models as different heads yield the same functionality
__lowerCamelCase = self.all_model_classes[0]
__lowerCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
__lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = model(**lowerCamelCase__ )
__lowerCamelCase = outputs[0][-1]
# Encoder-/Decoder-only models
__lowerCamelCase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__lowerCamelCase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowerCamelCase__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(**lowerCamelCase__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__lowerCamelCase = copy.deepcopy(lowerCamelCase__ )
__lowerCamelCase = None
__lowerCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(**lowerCamelCase__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__lowerCamelCase = copy.deepcopy(lowerCamelCase__ )
__lowerCamelCase = False
__lowerCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(**lowerCamelCase__ )
| 348 |
from __future__ import annotations
def lowerCamelCase_ ( UpperCamelCase__ : list[float] , UpperCamelCase__ : list[float] ) -> float:
"""simple docstring"""
__lowerCamelCase = sorted(numsa + numsa )
__lowerCamelCase , __lowerCamelCase = divmod(len(UpperCamelCase__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = [float(x) for x in input("Enter the elements of first array: ").split()]
__A = [float(x) for x in input("Enter the elements of second array: ").split()]
print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 348 | 1 |
__A = [0, 2, 4, 6, 8]
__A = [1, 3, 5, 7, 9]
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> int:
"""simple docstring"""
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
__lowerCamelCase = 0
for digit in range(10 ):
__lowerCamelCase = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , UpperCamelCase__ , UpperCamelCase__ )
return result
__lowerCamelCase = 0
for digita in range(10 ):
__lowerCamelCase = digita
if (remainder + digita) % 2 == 0:
__lowerCamelCase = ODD_DIGITS
else:
__lowerCamelCase = EVEN_DIGITS
for digita in other_parity_digits:
__lowerCamelCase = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , UpperCamelCase__ , UpperCamelCase__ , )
return result
def lowerCamelCase_ ( UpperCamelCase__ : int = 9 ) -> int:
"""simple docstring"""
__lowerCamelCase = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(UpperCamelCase__ , 0 , [0] * length , UpperCamelCase__ )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 348 |
__A = {
"joule": 1.0,
"kilojoule": 10_00,
"megajoule": 1_00_00_00,
"gigajoule": 10_00_00_00_00,
"wattsecond": 1.0,
"watthour": 36_00,
"kilowatthour": 3_60_00_00,
"newtonmeter": 1.0,
"calorie_nutr": 41_86.8,
"kilocalorie_nutr": 4_18_68_00.00,
"electronvolt": 1.6_0_2_1_7_6_6_3_4e-1_9,
"britishthermalunit_it": 10_55.0_55_85,
"footpound": 1.3_5_5_8_1_8,
}
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__lowerCamelCase = (
F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {', '.join(UpperCamelCase__ )}"""
)
raise ValueError(UpperCamelCase__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348 | 1 |
from __future__ import annotations
def lowerCamelCase_ ( UpperCamelCase__ : dict , UpperCamelCase__ : str ) -> set[str]:
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = set(UpperCamelCase__ ), [start]
while stack:
__lowerCamelCase = stack.pop()
explored.add(UpperCamelCase__ )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(UpperCamelCase__ )
return explored
__A = {
"A": ["B", "C", "D"],
"B": ["A", "D", "E"],
"C": ["A", "F"],
"D": ["B", "D"],
"E": ["B", "F"],
"F": ["C", "E", "G"],
"G": ["F"],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, "A"))
| 348 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''philschmid/bart-large-cnn-samsum'''
snake_case_ = (
'''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '''
'''and returns a summary of the text.'''
)
snake_case_ = '''summarizer'''
snake_case_ = AutoTokenizer
snake_case_ = AutoModelForSeqaSeqLM
snake_case_ = ['''text''']
snake_case_ = ['''text''']
def lowercase_ ( self , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return self.pre_processor(lowerCamelCase__ , return_tensors='pt' , truncation=lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
return self.model.generate(**lowerCamelCase__ )[0]
def lowercase_ ( self , lowerCamelCase__ ) -> Any:
'''simple docstring'''
return self.pre_processor.decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ )
| 348 | 1 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=12 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=32 , lowerCamelCase__=2 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=0.02 , lowerCamelCase__=0 , lowerCamelCase__=None , ) -> str:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = projection_dim
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = initializer_range
__lowerCamelCase = scope
__lowerCamelCase = bos_token_id
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
__lowerCamelCase = input_mask.numpy()
__lowerCamelCase , __lowerCamelCase = input_mask.shape
__lowerCamelCase = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase__ ):
__lowerCamelCase = 1
__lowerCamelCase = 0
__lowerCamelCase = self.get_config()
return config, input_ids, tf.convert_to_tensor(lowerCamelCase__ )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = TFBlipTextModel(config=lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , training=lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (TFBlipTextModel,) if is_tf_available() else ()
snake_case_ = False
snake_case_ = False
snake_case_ = False
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = BlipTextModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
pass
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@slow
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = TFBlipTextModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__=True ) -> List[Any]:
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=lowerCamelCase__ )
| 348 |
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=16 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=4 , ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_attention_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_choices
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_attention_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = True
__lowerCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = True
snake_case_ = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowerCamelCase = model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase__ )
__lowerCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase__ )
__lowerCamelCase = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
__lowerCamelCase = model(lowerCamelCase__ )[0]
__lowerCamelCase = [1, 11, 50_265]
self.assertEqual(list(output.shape ) , lowerCamelCase__ )
# compare the actual values for a slice.
__lowerCamelCase = np.array(
[[[40.48_80, 18.01_99, -5.23_67], [-1.88_77, -4.08_85, 10.70_85], [-2.26_13, -5.61_10, 7.26_65]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
@slow
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase__ )
__lowerCamelCase = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
__lowerCamelCase = model(lowerCamelCase__ )[0]
# compare the actual values for a slice.
__lowerCamelCase = np.array(
[[[0.02_08, -0.03_56, 0.02_37], [-0.15_69, -0.04_11, -0.26_26], [0.18_79, 0.01_25, -0.00_89]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 348 | 1 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
__A = data_utils.TransfoXLTokenizer
__A = data_utils.TransfoXLCorpus
__A = data_utils
__A = data_utils
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(UpperCamelCase__ , 'rb' ) as fp:
__lowerCamelCase = pickle.load(UpperCamelCase__ , encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
__lowerCamelCase = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print(F"""Save vocabulary to {pytorch_vocab_dump_path}""" )
__lowerCamelCase = corpus.vocab.__dict__
torch.save(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = corpus.__dict__
corpus_dict_no_vocab.pop('vocab' , UpperCamelCase__ )
__lowerCamelCase = pytorch_dump_folder_path + '/' + CORPUS_NAME
print(F"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
__lowerCamelCase = os.path.abspath(UpperCamelCase__ )
__lowerCamelCase = os.path.abspath(UpperCamelCase__ )
print(F"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
__lowerCamelCase = TransfoXLConfig()
else:
__lowerCamelCase = TransfoXLConfig.from_json_file(UpperCamelCase__ )
print(F"""Building PyTorch model from configuration: {config}""" )
__lowerCamelCase = TransfoXLLMHeadModel(UpperCamelCase__ )
__lowerCamelCase = load_tf_weights_in_transfo_xl(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
__lowerCamelCase = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
print(F"""Save PyTorch model to {os.path.abspath(UpperCamelCase__ )}""" )
torch.save(model.state_dict() , UpperCamelCase__ )
print(F"""Save configuration file to {os.path.abspath(UpperCamelCase__ )}""" )
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--tf_checkpoint_path",
default="",
type=str,
help="An optional path to a TensorFlow checkpoint path to be converted.",
)
parser.add_argument(
"--transfo_xl_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--transfo_xl_dataset_file",
default="",
type=str,
help="An optional dataset file to be converted in a vocabulary.",
)
__A = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 348 |
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
| 348 | 1 |
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def lowerCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__lowerCamelCase = '__test_patch_submodule_mock__'
with patch_submodule(_test_patching , 'os.path.join' , UpperCamelCase__ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def lowerCamelCase_ ( ) -> Optional[int]:
"""simple docstring"""
assert _test_patching.open is open
__lowerCamelCase = '__test_patch_submodule_builtin_mock__'
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , 'open' , UpperCamelCase__ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def lowerCamelCase_ ( ) -> Dict:
"""simple docstring"""
__lowerCamelCase = '__test_patch_submodule_missing_mock__'
with patch_submodule(_test_patching , 'pandas.read_csv' , UpperCamelCase__ ):
pass
def lowerCamelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
__lowerCamelCase = '__test_patch_submodule_missing_builtin_mock__'
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , 'len' , UpperCamelCase__ ) is None
with patch_submodule(_test_patching , 'len' , UpperCamelCase__ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def lowerCamelCase_ ( ) -> Any:
"""simple docstring"""
__lowerCamelCase = '__test_patch_submodule_start_and_stop_mock__'
__lowerCamelCase = patch_submodule(_test_patching , 'open' , UpperCamelCase__ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def lowerCamelCase_ ( ) -> List[str]:
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__lowerCamelCase = '__test_patch_submodule_successive_join__'
__lowerCamelCase = '__test_patch_submodule_successive_dirname__'
__lowerCamelCase = '__test_patch_submodule_successive_rename__'
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , 'os.path.join' , UpperCamelCase__ ):
with patch_submodule(_test_patching , 'os.rename' , UpperCamelCase__ ):
with patch_submodule(_test_patching , 'os.path.dirname' , UpperCamelCase__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , 'os.rename' , UpperCamelCase__ ):
with patch_submodule(_test_patching , 'os.path.join' , UpperCamelCase__ ):
with patch_submodule(_test_patching , 'os.path.dirname' , UpperCamelCase__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def lowerCamelCase_ ( ) -> str:
"""simple docstring"""
__lowerCamelCase = '__test_patch_submodule_doesnt_exist_mock__'
with patch_submodule(_test_patching , '__module_that_doesn_exist__.__attribute_that_doesn_exist__' , UpperCamelCase__ ):
pass
with patch_submodule(_test_patching , 'os.__attribute_that_doesn_exist__' , UpperCamelCase__ ):
pass
| 348 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def lowercase_ ( self , lowerCamelCase__=0 ) -> int:
'''simple docstring'''
__lowerCamelCase = floats_tensor((1, 3, 128, 128) , rng=random.Random(lowerCamelCase__ ) )
__lowerCamelCase = np.random.RandomState(lowerCamelCase__ )
__lowerCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__lowerCamelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__lowerCamelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
# warmup pass to apply optimizations
__lowerCamelCase = pipe(**self.get_dummy_inputs() )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__lowerCamelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__lowerCamelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__lowerCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def lowercase_ ( self ) -> int:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = ort.SessionOptions()
__lowerCamelCase = False
return options
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
__lowerCamelCase = init_image.resize((768, 512) )
# using the PNDM scheduler by default
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = 'A fantasy landscape, trending on artstation'
__lowerCamelCase = np.random.RandomState(0 )
__lowerCamelCase = pipe(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCamelCase__ , output_type='np' , )
__lowerCamelCase = output.images
__lowerCamelCase = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__lowerCamelCase = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
__lowerCamelCase = init_image.resize((768, 512) )
__lowerCamelCase = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=lowerCamelCase__ , safety_checker=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = 'A fantasy landscape, trending on artstation'
__lowerCamelCase = np.random.RandomState(0 )
__lowerCamelCase = pipe(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=lowerCamelCase__ , output_type='np' , )
__lowerCamelCase = output.images
__lowerCamelCase = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__lowerCamelCase = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 348 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__A = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = ['''pixel_values''']
def __init__( self , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = PILImageResampling.BICUBIC , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = True , lowerCamelCase__ = 1 / 255 , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = True , **lowerCamelCase__ , ) -> None:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
__lowerCamelCase = size if size is not None else {'shortest_edge': 224}
__lowerCamelCase = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
__lowerCamelCase = crop_size if crop_size is not None else {'height': 224, 'width': 224}
__lowerCamelCase = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ , param_name='crop_size' )
__lowerCamelCase = do_resize
__lowerCamelCase = size
__lowerCamelCase = resample
__lowerCamelCase = do_center_crop
__lowerCamelCase = crop_size
__lowerCamelCase = do_rescale
__lowerCamelCase = rescale_factor
__lowerCamelCase = do_normalize
__lowerCamelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__lowerCamelCase = image_std if image_std is not None else OPENAI_CLIP_STD
__lowerCamelCase = do_convert_rgb
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = PILImageResampling.BICUBIC , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> np.ndarray:
'''simple docstring'''
__lowerCamelCase = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__lowerCamelCase = get_resize_output_image_size(lowerCamelCase__ , size=size['shortest_edge'] , default_to_square=lowerCamelCase__ )
return resize(lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> np.ndarray:
'''simple docstring'''
__lowerCamelCase = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(lowerCamelCase__ , size=(size['height'], size['width']) , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> Optional[Any]:
'''simple docstring'''
return rescale(lowerCamelCase__ , scale=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = ChannelDimension.FIRST , **lowerCamelCase__ , ) -> PIL.Image.Image:
'''simple docstring'''
__lowerCamelCase = do_resize if do_resize is not None else self.do_resize
__lowerCamelCase = size if size is not None else self.size
__lowerCamelCase = get_size_dict(lowerCamelCase__ , param_name='size' , default_to_square=lowerCamelCase__ )
__lowerCamelCase = resample if resample is not None else self.resample
__lowerCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCamelCase = crop_size if crop_size is not None else self.crop_size
__lowerCamelCase = get_size_dict(lowerCamelCase__ , param_name='crop_size' , default_to_square=lowerCamelCase__ )
__lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCamelCase = image_mean if image_mean is not None else self.image_mean
__lowerCamelCase = image_std if image_std is not None else self.image_std
__lowerCamelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowerCamelCase = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowerCamelCase = [convert_to_rgb(lowerCamelCase__ ) for image in images]
# All transformations expect numpy arrays.
__lowerCamelCase = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
__lowerCamelCase = [self.resize(image=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ ) for image in images]
if do_center_crop:
__lowerCamelCase = [self.center_crop(image=lowerCamelCase__ , size=lowerCamelCase__ ) for image in images]
if do_rescale:
__lowerCamelCase = [self.rescale(image=lowerCamelCase__ , scale=lowerCamelCase__ ) for image in images]
if do_normalize:
__lowerCamelCase = [self.normalize(image=lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ ) for image in images]
__lowerCamelCase = [to_channel_dimension_format(lowerCamelCase__ , lowerCamelCase__ ) for image in images]
__lowerCamelCase = {'pixel_values': images}
return BatchFeature(data=lowerCamelCase__ , tensor_type=lowerCamelCase__ )
| 348 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__A = logging.get_logger(__name__)
__A = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
__A = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_57, 3_66, 4_38, 5_32, 6_85,
7_05, 7_96, 9_30, 10_58, 12_20, 12_67, 12_79, 13_03, 13_43, 13_77,
13_91, 16_35, 17_82, 18_75, 21_62, 23_61, 24_88, 34_67, 40_08, 42_11,
46_00, 48_08, 52_99, 58_55, 63_29, 72_03, 96_09, 99_59, 1_05_63, 1_07_86,
1_14_20, 1_17_09, 1_19_07, 1_31_63, 1_36_97, 1_37_00, 1_48_08, 1_53_06, 1_64_10, 1_67_91,
1_79_92, 1_92_03, 1_95_10, 2_07_24, 2_23_05, 2_29_35, 2_70_07, 3_01_09, 3_04_20, 3_34_09,
3_49_49, 4_02_83, 4_04_93, 4_05_49, 4_72_82, 4_91_46, 5_02_57, 5_03_59, 5_03_60, 5_03_61
]
__A = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_59, 5_03, 5_22, 5_42, 8_73,
8_93, 9_02, 9_18, 9_22, 9_31, 13_50, 18_53, 19_82, 24_60, 26_27,
32_46, 32_53, 32_68, 35_36, 38_46, 39_61, 41_83, 46_67, 65_85, 66_47,
72_73, 90_61, 93_83, 1_04_28, 1_09_29, 1_19_38, 1_20_33, 1_23_31, 1_25_62, 1_37_93,
1_41_57, 1_46_35, 1_52_65, 1_56_18, 1_65_53, 1_66_04, 1_83_62, 1_89_56, 2_00_75, 2_16_75,
2_25_20, 2_61_30, 2_61_61, 2_64_35, 2_82_79, 2_94_64, 3_16_50, 3_23_02, 3_24_70, 3_68_65,
4_28_63, 4_74_25, 4_98_70, 5_02_54, 5_02_58, 5_03_60, 5_03_61, 5_03_62
]
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''whisper'''
snake_case_ = ['''past_key_values''']
snake_case_ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowerCamelCase__=51_865 , lowerCamelCase__=80 , lowerCamelCase__=6 , lowerCamelCase__=4 , lowerCamelCase__=6 , lowerCamelCase__=4 , lowerCamelCase__=1_536 , lowerCamelCase__=1_536 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=50_257 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__="gelu" , lowerCamelCase__=256 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.02 , lowerCamelCase__=False , lowerCamelCase__=1_500 , lowerCamelCase__=448 , lowerCamelCase__=50_256 , lowerCamelCase__=50_256 , lowerCamelCase__=50_256 , lowerCamelCase__=None , lowerCamelCase__=[220, 50_256] , lowerCamelCase__=False , lowerCamelCase__=256 , lowerCamelCase__=False , lowerCamelCase__=0.05 , lowerCamelCase__=10 , lowerCamelCase__=2 , lowerCamelCase__=0.0 , lowerCamelCase__=10 , lowerCamelCase__=0 , lowerCamelCase__=7 , **lowerCamelCase__ , ) -> str:
'''simple docstring'''
__lowerCamelCase = vocab_size
__lowerCamelCase = num_mel_bins
__lowerCamelCase = d_model
__lowerCamelCase = encoder_layers
__lowerCamelCase = encoder_attention_heads
__lowerCamelCase = decoder_layers
__lowerCamelCase = decoder_attention_heads
__lowerCamelCase = decoder_ffn_dim
__lowerCamelCase = encoder_ffn_dim
__lowerCamelCase = dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = activation_function
__lowerCamelCase = init_std
__lowerCamelCase = encoder_layerdrop
__lowerCamelCase = decoder_layerdrop
__lowerCamelCase = use_cache
__lowerCamelCase = encoder_layers
__lowerCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCamelCase = max_source_positions
__lowerCamelCase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
__lowerCamelCase = classifier_proj_size
__lowerCamelCase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCamelCase = apply_spec_augment
__lowerCamelCase = mask_time_prob
__lowerCamelCase = mask_time_length
__lowerCamelCase = mask_time_min_masks
__lowerCamelCase = mask_feature_prob
__lowerCamelCase = mask_feature_length
__lowerCamelCase = mask_feature_min_masks
__lowerCamelCase = median_filter_width
super().__init__(
pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , is_encoder_decoder=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , suppress_tokens=lowerCamelCase__ , begin_suppress_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
@property
def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
__lowerCamelCase = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
__lowerCamelCase = {0: 'batch'}
else:
__lowerCamelCase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase__ , direction='inputs' )
return common_inputs
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = -1 , lowerCamelCase__ = -1 , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = 22_050 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 220 , ) -> Mapping[str, Any]:
'''simple docstring'''
__lowerCamelCase = OrderedDict()
__lowerCamelCase = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowerCamelCase__ , framework=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , time_duration=lowerCamelCase__ , frequency=lowerCamelCase__ , )
__lowerCamelCase = encoder_inputs['input_features'].shape[2]
__lowerCamelCase = encoder_sequence_length // 2 if self.use_past else seq_length
__lowerCamelCase = super().generate_dummy_inputs(
preprocessor.tokenizer , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = encoder_inputs.pop('input_features' )
__lowerCamelCase = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
__lowerCamelCase = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def lowercase_ ( self ) -> float:
'''simple docstring'''
return 1e-3
| 348 | 1 |
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__A = True
except ImportError:
__A = False
__A = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCamelCase_ ( UpperCamelCase__ : Namespace ) -> List[str]:
"""simple docstring"""
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
@staticmethod
def lowercase_ ( lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = parser.add_parser('add-new-model' )
add_new_model_parser.add_argument('--testing' , action='store_true' , help='If in testing mode.' )
add_new_model_parser.add_argument('--testing_file' , type=lowerCamelCase__ , help='Configuration file on which to run.' )
add_new_model_parser.add_argument(
'--path' , type=lowerCamelCase__ , help='Path to cookiecutter. Should only be used for testing purposes.' )
add_new_model_parser.set_defaults(func=lowerCamelCase__ )
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , *lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = testing
__lowerCamelCase = testing_file
__lowerCamelCase = path
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.' )
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
__lowerCamelCase = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]]
if len(lowerCamelCase__ ) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.' )
__lowerCamelCase = (
Path(lowerCamelCase__ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
__lowerCamelCase = path_to_transformer_root / 'templates' / 'adding_a_new_model'
# Execute cookiecutter
if not self._testing:
cookiecutter(str(lowerCamelCase__ ) )
else:
with open(self._testing_file , 'r' ) as configuration_file:
__lowerCamelCase = json.load(lowerCamelCase__ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=lowerCamelCase__ , extra_context=lowerCamelCase__ , )
__lowerCamelCase = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0]
# Retrieve configuration
with open(directory + '/configuration.json' , 'r' ) as configuration_file:
__lowerCamelCase = json.load(lowerCamelCase__ )
__lowerCamelCase = configuration['lowercase_modelname']
__lowerCamelCase = configuration['generate_tensorflow_pytorch_and_flax']
os.remove(f"""{directory}/configuration.json""" )
__lowerCamelCase = 'PyTorch' in generate_tensorflow_pytorch_and_flax
__lowerCamelCase = 'TensorFlow' in generate_tensorflow_pytorch_and_flax
__lowerCamelCase = 'Flax' in generate_tensorflow_pytorch_and_flax
__lowerCamelCase = f"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"""
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
os.makedirs(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""" , exist_ok=lowerCamelCase__ )
# Tests require submodules as they have parent imports
with open(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""" , 'w' ):
pass
shutil.move(
f"""{directory}/__init__.py""" , f"""{model_dir}/__init__.py""" , )
shutil.move(
f"""{directory}/configuration_{lowercase_model_name}.py""" , f"""{model_dir}/configuration_{lowercase_model_name}.py""" , )
def remove_copy_lines(lowerCamelCase__ ):
with open(lowerCamelCase__ , 'r' ) as f:
__lowerCamelCase = f.readlines()
with open(lowerCamelCase__ , 'w' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(lowerCamelCase__ )
if output_pytorch:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_{lowercase_model_name}.py""" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_tf_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_tf_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" )
if output_flax:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_flax_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_flax_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/{lowercase_model_name}.md""" , f"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""" , )
shutil.move(
f"""{directory}/tokenization_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/tokenization_fast_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
# Create temp file
__lowerCamelCase , __lowerCamelCase = mkstemp()
__lowerCamelCase = False
with fdopen(lowerCamelCase__ , 'w' ) as new_file:
with open(lowerCamelCase__ ) as old_file:
for line in old_file:
new_file.write(lowerCamelCase__ )
if line_to_copy_below in line:
__lowerCamelCase = True
for line_to_copy in lines_to_copy:
new_file.write(lowerCamelCase__ )
if not line_found:
raise ValueError(f"""Line {line_to_copy_below} was not found in file.""" )
# Copy the file permissions from the old file to the new file
copymode(lowerCamelCase__ , lowerCamelCase__ )
# Remove original file
remove(lowerCamelCase__ )
# Move new file
move(lowerCamelCase__ , lowerCamelCase__ )
def skip_units(lowerCamelCase__ ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(lowerCamelCase__ ):
with open(lowerCamelCase__ ) as datafile:
__lowerCamelCase = []
__lowerCamelCase = False
__lowerCamelCase = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
__lowerCamelCase = line.split('"' )[1]
__lowerCamelCase = skip_units(lowerCamelCase__ )
elif "# Below: " in line and "##" not in line:
__lowerCamelCase = line.split('"' )[1]
__lowerCamelCase = skip_units(lowerCamelCase__ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = []
elif "# Replace with" in line and "##" not in line:
__lowerCamelCase = []
elif "##" not in line:
lines_to_copy.append(lowerCamelCase__ )
remove(lowerCamelCase__ )
replace_in_files(f"""{directory}/to_replace_{lowercase_model_name}.py""" )
os.rmdir(lowerCamelCase__ )
| 348 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=14 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=0.02 , ) -> int:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = rotary_dim
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = initializer_range
__lowerCamelCase = None
__lowerCamelCase = vocab_size - 1
__lowerCamelCase = vocab_size - 1
__lowerCamelCase = vocab_size - 1
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowerCamelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
__lowerCamelCase = 20
__lowerCamelCase = model_class_name(lowerCamelCase__ )
__lowerCamelCase = model.init_cache(input_ids.shape[0] , lowerCamelCase__ )
__lowerCamelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
__lowerCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCamelCase = model(
input_ids[:, :-1] , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , position_ids=lowerCamelCase__ , )
__lowerCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCamelCase = model(
input_ids[:, -1:] , attention_mask=lowerCamelCase__ , past_key_values=outputs_cache.past_key_values , position_ids=lowerCamelCase__ , )
__lowerCamelCase = model(lowerCamelCase__ )
__lowerCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = 20
__lowerCamelCase = model_class_name(lowerCamelCase__ )
__lowerCamelCase = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__lowerCamelCase = model.init_cache(input_ids.shape[0] , lowerCamelCase__ )
__lowerCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCamelCase = model(
input_ids[:, :-1] , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , position_ids=lowerCamelCase__ , )
__lowerCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCamelCase = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowerCamelCase__ , position_ids=lowerCamelCase__ , )
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
__lowerCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
snake_case_ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = FlaxGPTJModelTester(self )
def lowercase_ ( self ) -> str:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
@tooslow
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
__lowerCamelCase = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=lowerCamelCase__ , truncation=lowerCamelCase__ )
__lowerCamelCase = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
__lowerCamelCase = False
__lowerCamelCase = model.config.eos_token_id
__lowerCamelCase = jax.jit(model.generate )
__lowerCamelCase = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
__lowerCamelCase = tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
__lowerCamelCase = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@is_pt_flax_cross_test
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCamelCase = getattr(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase = pt_inputs['input_ids'].shape
__lowerCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase__ ):
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = pt_model_class(lowerCamelCase__ ).eval()
__lowerCamelCase = model_class(lowerCamelCase__ , dtype=jnp.floataa )
__lowerCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCamelCase__ )
__lowerCamelCase = fx_state
with torch.no_grad():
__lowerCamelCase = pt_model(**lowerCamelCase__ ).to_tuple()
__lowerCamelCase = fx_model(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase__ )
__lowerCamelCase = model_class.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
__lowerCamelCase = fx_model_loaded(**lowerCamelCase__ ).to_tuple()
self.assertEqual(
len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCamelCase = getattr(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = pt_model_class(lowerCamelCase__ ).eval()
__lowerCamelCase = model_class(lowerCamelCase__ , dtype=jnp.floataa )
__lowerCamelCase = load_flax_weights_in_pytorch_model(lowerCamelCase__ , fx_model.params )
__lowerCamelCase , __lowerCamelCase = pt_inputs['input_ids'].shape
__lowerCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase__ ):
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = 0
__lowerCamelCase = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__lowerCamelCase = pt_model(**lowerCamelCase__ ).to_tuple()
__lowerCamelCase = fx_model(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase__ )
__lowerCamelCase = pt_model_class.from_pretrained(lowerCamelCase__ , from_flax=lowerCamelCase__ )
with torch.no_grad():
__lowerCamelCase = pt_model_loaded(**lowerCamelCase__ ).to_tuple()
self.assertEqual(
len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowerCamelCase = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
__lowerCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
| 348 | 1 |
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : list[str] ) -> str:
"""simple docstring"""
__lowerCamelCase = ''
for word_or_phrase in separated:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise Exception('join() accepts only strings to be joined' )
joined += word_or_phrase + separator
return joined.strip(UpperCamelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 348 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
__A = False
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase_ ( self ) -> int:
'''simple docstring'''
return 12
@property
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return 12
@property
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
return 32
@property
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(lowerCamelCase__ )
@property
def lowercase_ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = 12
__lowerCamelCase = 12
__lowerCamelCase = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
__lowerCamelCase = TransformeraDModel(**lowerCamelCase__ )
return model
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = 'cpu'
__lowerCamelCase = self.dummy_vqvae
__lowerCamelCase = self.dummy_text_encoder
__lowerCamelCase = self.dummy_tokenizer
__lowerCamelCase = self.dummy_transformer
__lowerCamelCase = VQDiffusionScheduler(self.num_embed )
__lowerCamelCase = LearnedClassifierFreeSamplingEmbeddings(learnable=lowerCamelCase__ )
__lowerCamelCase = VQDiffusionPipeline(
vqvae=lowerCamelCase__ , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ , transformer=lowerCamelCase__ , scheduler=lowerCamelCase__ , learned_classifier_free_sampling_embeddings=lowerCamelCase__ , )
__lowerCamelCase = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = 'teddy bear playing in the pool'
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
__lowerCamelCase = pipe([prompt] , generator=lowerCamelCase__ , num_inference_steps=2 , output_type='np' )
__lowerCamelCase = output.images
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
__lowerCamelCase = pipe(
[prompt] , generator=lowerCamelCase__ , output_type='np' , return_dict=lowerCamelCase__ , num_inference_steps=2 )[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowerCamelCase = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = 'cpu'
__lowerCamelCase = self.dummy_vqvae
__lowerCamelCase = self.dummy_text_encoder
__lowerCamelCase = self.dummy_tokenizer
__lowerCamelCase = self.dummy_transformer
__lowerCamelCase = VQDiffusionScheduler(self.num_embed )
__lowerCamelCase = LearnedClassifierFreeSamplingEmbeddings(
learnable=lowerCamelCase__ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
__lowerCamelCase = VQDiffusionPipeline(
vqvae=lowerCamelCase__ , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ , transformer=lowerCamelCase__ , scheduler=lowerCamelCase__ , learned_classifier_free_sampling_embeddings=lowerCamelCase__ , )
__lowerCamelCase = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = 'teddy bear playing in the pool'
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
__lowerCamelCase = pipe([prompt] , generator=lowerCamelCase__ , num_inference_steps=2 , output_type='np' )
__lowerCamelCase = output.images
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
__lowerCamelCase = pipe(
[prompt] , generator=lowerCamelCase__ , output_type='np' , return_dict=lowerCamelCase__ , num_inference_steps=2 )[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowerCamelCase = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' )
__lowerCamelCase = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' )
__lowerCamelCase = pipeline.to(lowerCamelCase__ )
pipeline.set_progress_bar_config(disable=lowerCamelCase__ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
__lowerCamelCase = pipeline(
'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=lowerCamelCase__ , output_type='np' , )
__lowerCamelCase = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 348 | 1 |
import numpy as np
from PIL import Image
def lowerCamelCase_ ( UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> np.ndarray:
"""simple docstring"""
__lowerCamelCase = np.array(UpperCamelCase__ )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = 0
# compute the shape of the output matrix
__lowerCamelCase = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__lowerCamelCase = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__lowerCamelCase = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__lowerCamelCase = 0
__lowerCamelCase = 0
return updated_arr
def lowerCamelCase_ ( UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> np.ndarray:
"""simple docstring"""
__lowerCamelCase = np.array(UpperCamelCase__ )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = 0
# compute the shape of the output matrix
__lowerCamelCase = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__lowerCamelCase = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__lowerCamelCase = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__lowerCamelCase = 0
__lowerCamelCase = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="avgpooling", verbose=True)
# Loading the image
__A = Image.open("path_to_image")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 348 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=2 , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=10 , lowerCamelCase__=3 , lowerCamelCase__=32 * 8 , lowerCamelCase__=32 * 8 , lowerCamelCase__=4 , lowerCamelCase__=64 , ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = is_training
__lowerCamelCase = use_auxiliary_loss
__lowerCamelCase = num_queries
__lowerCamelCase = num_channels
__lowerCamelCase = min_size
__lowerCamelCase = max_size
__lowerCamelCase = num_labels
__lowerCamelCase = hidden_dim
__lowerCamelCase = hidden_dim
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCamelCase__ )
__lowerCamelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase__ )
__lowerCamelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase__ ) > 0.5
).float()
__lowerCamelCase = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase__ ) > 0.5).long()
__lowerCamelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
__lowerCamelCase = self.num_queries
__lowerCamelCase = self.num_labels
__lowerCamelCase = [1, 1, 1, 1]
__lowerCamelCase = self.num_channels
__lowerCamelCase = 64
__lowerCamelCase = 128
__lowerCamelCase = self.hidden_dim
__lowerCamelCase = self.hidden_dim
__lowerCamelCase = self.hidden_dim
return config
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
__lowerCamelCase = output.encoder_hidden_states
__lowerCamelCase = output.pixel_decoder_hidden_states
__lowerCamelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , config.decoder_layers )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ) -> Tuple:
'''simple docstring'''
with torch.no_grad():
__lowerCamelCase = MaskaFormerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = MaskaFormerForUniversalSegmentation(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
def comm_check_on_output(lowerCamelCase__ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__lowerCamelCase = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
__lowerCamelCase = model(
pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
snake_case_ = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = MaskaFormerModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCamelCase__ )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
pass
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(lowerCamelCase__ )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
@slow
def lowercase_ ( self ) -> int:
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
__lowerCamelCase = MaskaFormerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = (self.model_tester.min_size,) * 2
__lowerCamelCase = {
'pixel_values': torch.randn((2, 3, *size) , device=lowerCamelCase__ ),
'mask_labels': torch.randn((2, 10, *size) , device=lowerCamelCase__ ),
'class_labels': torch.zeros(2 , 10 , device=lowerCamelCase__ ).long(),
}
__lowerCamelCase = self.model_tester.get_config()
__lowerCamelCase = MaskaFormerForUniversalSegmentation(lowerCamelCase__ ).to(lowerCamelCase__ )
__lowerCamelCase = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
__lowerCamelCase = model(**lowerCamelCase__ , output_attentions=lowerCamelCase__ )
self.assertTrue(outputs.attentions is not None )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
__lowerCamelCase = self.all_model_classes[1]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
__lowerCamelCase = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ).loss
loss.backward()
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = self.all_model_classes[1]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
model.train()
__lowerCamelCase = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
__lowerCamelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__lowerCamelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
__lowerCamelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__lowerCamelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCamelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__A = 1e-4
def lowerCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ )
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ )
__lowerCamelCase = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 384, 384) )
with torch.no_grad():
__lowerCamelCase = model(**lowerCamelCase__ )
__lowerCamelCase = torch.tensor(
[[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
__lowerCamelCase = torch.tensor(
[[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
__lowerCamelCase = torch.tensor(
[[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ )
__lowerCamelCase = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 384, 384) )
with torch.no_grad():
__lowerCamelCase = model(**lowerCamelCase__ )
# masks_queries_logits
__lowerCamelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
__lowerCamelCase = [
[-8.78_39, -9.00_56, -8.81_21],
[-7.41_04, -7.03_13, -6.54_01],
[-6.61_05, -6.34_27, -6.46_75],
]
__lowerCamelCase = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
# class_queries_logits
__lowerCamelCase = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
__lowerCamelCase = torch.tensor(
[
[1.83_24, -8.08_35, -4.19_22],
[0.84_50, -9.00_50, -3.60_53],
[0.30_45, -7.72_93, -3.02_75],
] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
__lowerCamelCase = inputs['pixel_values'].to(lowerCamelCase__ )
__lowerCamelCase = [el.to(lowerCamelCase__ ) for el in inputs['mask_labels']]
__lowerCamelCase = [el.to(lowerCamelCase__ ) for el in inputs['class_labels']]
with torch.no_grad():
__lowerCamelCase = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
| 348 | 1 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = GPTaTokenizer
snake_case_ = GPTaTokenizerFast
snake_case_ = True
snake_case_ = {'''add_prefix_space''': True}
snake_case_ = False
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCamelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
__lowerCamelCase = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
__lowerCamelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__lowerCamelCase = {'unk_token': '<unk>'}
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCamelCase__ ) )
def lowercase_ ( self , **lowerCamelCase__ ) -> str:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def lowercase_ ( self , **lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = 'lower newer'
__lowerCamelCase = 'lower newer'
return input_text, output_text
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCamelCase = 'lower newer'
__lowerCamelCase = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
__lowerCamelCase = tokenizer.tokenize(lowerCamelCase__ , add_prefix_space=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = tokens + [tokenizer.unk_token]
__lowerCamelCase = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = self.get_rust_tokenizer(add_prefix_space=lowerCamelCase__ )
__lowerCamelCase = 'lower newer'
# Testing tokenization
__lowerCamelCase = tokenizer.tokenize(lowerCamelCase__ , add_prefix_space=lowerCamelCase__ )
__lowerCamelCase = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
# Testing conversion to ids without special tokens
__lowerCamelCase = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ )
__lowerCamelCase = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
# Testing conversion to ids with special tokens
__lowerCamelCase = self.get_rust_tokenizer(add_prefix_space=lowerCamelCase__ )
__lowerCamelCase = tokenizer.encode(lowerCamelCase__ , add_prefix_space=lowerCamelCase__ )
__lowerCamelCase = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
# Testing the unknown token
__lowerCamelCase = tokens + [rust_tokenizer.unk_token]
__lowerCamelCase = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
def lowercase_ ( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def lowercase_ ( self , lowerCamelCase__=15 ) -> Any:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
# Simple input
__lowerCamelCase = 'This is a simple input'
__lowerCamelCase = ['This is a simple input 1', 'This is a simple input 2']
__lowerCamelCase = ('This is a simple input', 'This is a pair')
__lowerCamelCase = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(lowerCamelCase__ , tokenizer_r.encode , lowerCamelCase__ , max_length=lowerCamelCase__ , padding='max_length' )
# Simple input
self.assertRaises(lowerCamelCase__ , tokenizer_r.encode_plus , lowerCamelCase__ , max_length=lowerCamelCase__ , padding='max_length' )
# Simple input
self.assertRaises(
lowerCamelCase__ , tokenizer_r.batch_encode_plus , lowerCamelCase__ , max_length=lowerCamelCase__ , padding='max_length' , )
# Pair input
self.assertRaises(lowerCamelCase__ , tokenizer_r.encode , lowerCamelCase__ , max_length=lowerCamelCase__ , padding='max_length' )
# Pair input
self.assertRaises(lowerCamelCase__ , tokenizer_r.encode_plus , lowerCamelCase__ , max_length=lowerCamelCase__ , padding='max_length' )
# Pair input
self.assertRaises(
lowerCamelCase__ , tokenizer_r.batch_encode_plus , lowerCamelCase__ , max_length=lowerCamelCase__ , padding='max_length' , )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
__lowerCamelCase = 'This is a simple input'
__lowerCamelCase = ['This is a simple input looooooooong', 'This is a simple input']
__lowerCamelCase = ('This is a simple input', 'This is a pair')
__lowerCamelCase = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
__lowerCamelCase = tokenizer.pad_token_id
__lowerCamelCase = tokenizer(lowerCamelCase__ , padding='max_length' , max_length=30 , return_tensors='np' )
__lowerCamelCase = tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , truncate=lowerCamelCase__ , return_tensors='np' )
__lowerCamelCase = tokenizer(*lowerCamelCase__ , padding='max_length' , max_length=60 , return_tensors='np' )
__lowerCamelCase = tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , truncate=lowerCamelCase__ , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = '$$$'
__lowerCamelCase = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=lowerCamelCase__ , add_bos_token=lowerCamelCase__ )
__lowerCamelCase = 'This is a simple input'
__lowerCamelCase = ['This is a simple input 1', 'This is a simple input 2']
__lowerCamelCase = tokenizer.bos_token_id
__lowerCamelCase = tokenizer(lowerCamelCase__ )
__lowerCamelCase = tokenizer(lowerCamelCase__ )
self.assertEqual(out_s.input_ids[0] , lowerCamelCase__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__lowerCamelCase = tokenizer.decode(out_s.input_ids )
__lowerCamelCase = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , lowerCamelCase__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def lowercase_ ( self ) -> int:
'''simple docstring'''
pass
def lowercase_ ( self ) -> Any:
'''simple docstring'''
# TODO: change to self.get_tokenizers() when the fast version is implemented
__lowerCamelCase = [self.get_tokenizer(do_lower_case=lowerCamelCase__ , add_bos_token=lowerCamelCase__ )]
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__lowerCamelCase = 'Encode this.'
__lowerCamelCase = 'This one too please.'
__lowerCamelCase = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
encoded_sequence += tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
__lowerCamelCase = tokenizer.encode_plus(
lowerCamelCase__ , lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , )
__lowerCamelCase = encoded_sequence_dict['input_ids']
__lowerCamelCase = encoded_sequence_dict['special_tokens_mask']
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
__lowerCamelCase = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(lowerCamelCase__ )
]
__lowerCamelCase = [x for x in filtered_sequence if x is not None]
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
__lowerCamelCase = AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=lowerCamelCase__ )
__lowerCamelCase = 'A photo of a cat'
__lowerCamelCase = tokenizer.encode(
lowerCamelCase__ , )
self.assertEqual(lowerCamelCase__ , [2, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained('test_opt' )
__lowerCamelCase = AutoTokenizer.from_pretrained('./test_opt' )
__lowerCamelCase = tokenizer.encode(
lowerCamelCase__ , )
self.assertEqual(lowerCamelCase__ , [2, 250, 1_345, 9, 10, 4_758] )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = AutoTokenizer.from_pretrained('facebook/opt-350m' , use_slow=lowerCamelCase__ )
__lowerCamelCase = 'A photo of a cat'
__lowerCamelCase = tokenizer.encode(
lowerCamelCase__ , )
# Same as above
self.assertEqual(lowerCamelCase__ , [2, 250, 1_345, 9, 10, 4_758] )
@unittest.skip('This test is failing because of a bug in the fast tokenizer' )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=lowerCamelCase__ )
__lowerCamelCase = 'bos'
__lowerCamelCase = tokenizer.get_vocab()['bos']
__lowerCamelCase = 'A photo of a cat'
__lowerCamelCase = tokenizer.encode(
lowerCamelCase__ , )
# We changed the bos token
self.assertEqual(lowerCamelCase__ , [31_957, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained('./tok' )
__lowerCamelCase = AutoTokenizer.from_pretrained('./tok' )
self.assertTrue(tokenizer.is_fast )
__lowerCamelCase = tokenizer.encode(
lowerCamelCase__ , )
self.assertEqual(lowerCamelCase__ , [31_957, 250, 1_345, 9, 10, 4_758] )
| 348 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A = {
"facebook/mask2former-swin-small-coco-instance": (
"https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
__A = logging.get_logger(__name__)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''mask2former'''
snake_case_ = ['''swin''']
snake_case_ = {'''hidden_size''': '''hidden_dim'''}
def __init__( self , lowerCamelCase__ = None , lowerCamelCase__ = 256 , lowerCamelCase__ = 256 , lowerCamelCase__ = 256 , lowerCamelCase__ = 1_024 , lowerCamelCase__ = "relu" , lowerCamelCase__ = 6 , lowerCamelCase__ = 10 , lowerCamelCase__ = 8 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = 2_048 , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = 4 , lowerCamelCase__ = 255 , lowerCamelCase__ = 100 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 2.0 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 12_544 , lowerCamelCase__ = 3.0 , lowerCamelCase__ = 0.75 , lowerCamelCase__ = 0.02 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = True , lowerCamelCase__ = [4, 8, 16, 32] , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> Tuple:
'''simple docstring'''
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
__lowerCamelCase = CONFIG_MAPPING['swin'](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=lowerCamelCase__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase = backbone_config.pop('model_type' )
__lowerCamelCase = CONFIG_MAPPING[backbone_model_type]
__lowerCamelCase = config_class.from_dict(lowerCamelCase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
f"""Supported model types: {','.join(self.backbones_supported )}""" )
__lowerCamelCase = backbone_config
__lowerCamelCase = feature_size
__lowerCamelCase = mask_feature_size
__lowerCamelCase = hidden_dim
__lowerCamelCase = encoder_feedforward_dim
__lowerCamelCase = activation_function
__lowerCamelCase = encoder_layers
__lowerCamelCase = decoder_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = dropout
__lowerCamelCase = dim_feedforward
__lowerCamelCase = pre_norm
__lowerCamelCase = enforce_input_projection
__lowerCamelCase = common_stride
__lowerCamelCase = ignore_value
__lowerCamelCase = num_queries
__lowerCamelCase = no_object_weight
__lowerCamelCase = class_weight
__lowerCamelCase = mask_weight
__lowerCamelCase = dice_weight
__lowerCamelCase = train_num_points
__lowerCamelCase = oversample_ratio
__lowerCamelCase = importance_sample_ratio
__lowerCamelCase = init_std
__lowerCamelCase = init_xavier_std
__lowerCamelCase = use_auxiliary_loss
__lowerCamelCase = feature_strides
__lowerCamelCase = output_auxiliary_logits
__lowerCamelCase = decoder_layers
super().__init__(**lowerCamelCase__ )
@classmethod
def lowercase_ ( cls , lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
return cls(
backbone_config=lowerCamelCase__ , **lowerCamelCase__ , )
def lowercase_ ( self ) -> Dict[str, any]:
'''simple docstring'''
__lowerCamelCase = copy.deepcopy(self.__dict__ )
__lowerCamelCase = self.backbone_config.to_dict()
__lowerCamelCase = self.__class__.model_type
return output
| 348 | 1 |
def lowerCamelCase_ ( UpperCamelCase__ : int ) -> int:
"""simple docstring"""
__lowerCamelCase = abs(UpperCamelCase__ )
__lowerCamelCase = 0
while n > 0:
res += n % 10
n //= 10
return res
def lowerCamelCase_ ( UpperCamelCase__ : int ) -> int:
"""simple docstring"""
__lowerCamelCase = abs(UpperCamelCase__ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def lowerCamelCase_ ( UpperCamelCase__ : int ) -> int:
"""simple docstring"""
return sum(int(UpperCamelCase__ ) for c in str(abs(UpperCamelCase__ ) ) )
def lowerCamelCase_ ( ) -> None:
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(UpperCamelCase__ : Callable , UpperCamelCase__ : int ) -> None:
__lowerCamelCase = F"""{func.__name__}({value})"""
__lowerCamelCase = timeit(F"""__main__.{call}""" , setup='import __main__' )
print(F"""{call:56} = {func(UpperCamelCase__ )} -- {timing:.4f} seconds""" )
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(UpperCamelCase__ , UpperCamelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 348 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = 42
class __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
"""simple docstring"""
@register_to_config
def __init__( self , lowerCamelCase__ = 32 , lowerCamelCase__ = 64 , lowerCamelCase__ = 20 , lowerCamelCase__ = 768 , lowerCamelCase__=77 , lowerCamelCase__=4 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = "silu" , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "linear" , lowerCamelCase__ = "prd" , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , ) -> Tuple:
'''simple docstring'''
super().__init__()
__lowerCamelCase = num_attention_heads
__lowerCamelCase = attention_head_dim
__lowerCamelCase = num_attention_heads * attention_head_dim
__lowerCamelCase = additional_embeddings
__lowerCamelCase = time_embed_dim or inner_dim
__lowerCamelCase = embedding_proj_dim or embedding_dim
__lowerCamelCase = clip_embed_dim or embedding_dim
__lowerCamelCase = Timesteps(lowerCamelCase__ , lowerCamelCase__ , 0 )
__lowerCamelCase = TimestepEmbedding(lowerCamelCase__ , lowerCamelCase__ , out_dim=lowerCamelCase__ , act_fn=lowerCamelCase__ )
__lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
if embedding_proj_norm_type is None:
__lowerCamelCase = None
elif embedding_proj_norm_type == "layer":
__lowerCamelCase = nn.LayerNorm(lowerCamelCase__ )
else:
raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
__lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
if encoder_hid_proj_type is None:
__lowerCamelCase = None
elif encoder_hid_proj_type == "linear":
__lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
else:
raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
__lowerCamelCase = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , lowerCamelCase__ ) )
if added_emb_type == "prd":
__lowerCamelCase = nn.Parameter(torch.zeros(1 , 1 , lowerCamelCase__ ) )
elif added_emb_type is None:
__lowerCamelCase = None
else:
raise ValueError(
f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
__lowerCamelCase = nn.ModuleList(
[
BasicTransformerBlock(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , dropout=lowerCamelCase__ , activation_fn='gelu' , attention_bias=lowerCamelCase__ , )
for d in range(lowerCamelCase__ )
] )
if norm_in_type == "layer":
__lowerCamelCase = nn.LayerNorm(lowerCamelCase__ )
elif norm_in_type is None:
__lowerCamelCase = None
else:
raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" )
__lowerCamelCase = nn.LayerNorm(lowerCamelCase__ )
__lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 )
causal_attention_mask.triu_(1 )
__lowerCamelCase = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' , lowerCamelCase__ , persistent=lowerCamelCase__ )
__lowerCamelCase = nn.Parameter(torch.zeros(1 , lowerCamelCase__ ) )
__lowerCamelCase = nn.Parameter(torch.zeros(1 , lowerCamelCase__ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowercase_ ( self ) -> Dict[str, AttentionProcessor]:
'''simple docstring'''
__lowerCamelCase = {}
def fn_recursive_add_processors(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if hasattr(lowerCamelCase__ , 'set_processor' ):
__lowerCamelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , lowerCamelCase__ , lowerCamelCase__ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return processors
def lowercase_ ( self , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = len(self.attn_processors.keys() )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(lowerCamelCase__ )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if hasattr(lowerCamelCase__ , 'set_processor' ):
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
module.set_processor(lowerCamelCase__ )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , lowerCamelCase__ , lowerCamelCase__ )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = True , ) -> int:
'''simple docstring'''
__lowerCamelCase = hidden_states.shape[0]
__lowerCamelCase = timestep
if not torch.is_tensor(lowerCamelCase__ ):
__lowerCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(lowerCamelCase__ ) and len(timesteps.shape ) == 0:
__lowerCamelCase = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__lowerCamelCase = timesteps * torch.ones(lowerCamelCase__ , dtype=timesteps.dtype , device=timesteps.device )
__lowerCamelCase = self.time_proj(lowerCamelCase__ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
__lowerCamelCase = timesteps_projected.to(dtype=self.dtype )
__lowerCamelCase = self.time_embedding(lowerCamelCase__ )
if self.embedding_proj_norm is not None:
__lowerCamelCase = self.embedding_proj_norm(lowerCamelCase__ )
__lowerCamelCase = self.embedding_proj(lowerCamelCase__ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
__lowerCamelCase = self.encoder_hidden_states_proj(lowerCamelCase__ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
__lowerCamelCase = self.proj_in(lowerCamelCase__ )
__lowerCamelCase = self.positional_embedding.to(hidden_states.dtype )
__lowerCamelCase = []
__lowerCamelCase = 0
if encoder_hidden_states is not None:
additional_embeds.append(lowerCamelCase__ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
__lowerCamelCase = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
__lowerCamelCase = hidden_states[:, None, :]
__lowerCamelCase = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
__lowerCamelCase = self.prd_embedding.to(hidden_states.dtype ).expand(lowerCamelCase__ , -1 , -1 )
additional_embeds.append(lowerCamelCase__ )
__lowerCamelCase = torch.cat(
lowerCamelCase__ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
__lowerCamelCase = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
__lowerCamelCase = F.pad(
lowerCamelCase__ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
__lowerCamelCase = hidden_states + positional_embeddings
if attention_mask is not None:
__lowerCamelCase = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
__lowerCamelCase = F.pad(lowerCamelCase__ , (0, self.additional_embeddings) , value=0.0 )
__lowerCamelCase = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
__lowerCamelCase = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
__lowerCamelCase = self.norm_in(lowerCamelCase__ )
for block in self.transformer_blocks:
__lowerCamelCase = block(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
__lowerCamelCase = self.norm_out(lowerCamelCase__ )
if self.prd_embedding is not None:
__lowerCamelCase = hidden_states[:, -1]
else:
__lowerCamelCase = hidden_states[:, additional_embeddings_len:]
__lowerCamelCase = self.proj_to_clip_embeddings(lowerCamelCase__ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 348 | 1 |
def lowerCamelCase_ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ) -> Dict:
"""simple docstring"""
__lowerCamelCase = 0
__lowerCamelCase = len(UpperCamelCase__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__lowerCamelCase = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCamelCase__ ):
return None
__lowerCamelCase = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
__lowerCamelCase = left
__lowerCamelCase = point
elif point > right:
__lowerCamelCase = right
__lowerCamelCase = point
else:
if item < current_item:
__lowerCamelCase = point - 1
else:
__lowerCamelCase = point + 1
return None
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ) -> Any:
"""simple docstring"""
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__lowerCamelCase = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCamelCase__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
elif point > right:
return interpolation_search_by_recursion(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , point - 1 )
else:
return interpolation_search_by_recursion(
UpperCamelCase__ , UpperCamelCase__ , point + 1 , UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : Dict ) -> str:
"""simple docstring"""
if collection != sorted(UpperCamelCase__ ):
raise ValueError('Collection must be ascending sorted' )
return True
if __name__ == "__main__":
import sys
__A = 0
if debug == 1:
__A = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
__A = 67
__A = interpolation_search(collection, target)
if result is not None:
print(f'''{target} found at positions: {result}''')
else:
print("Not found")
| 348 |
import sys
from collections import defaultdict
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = []
def lowercase_ ( self , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
return self.node_position[vertex]
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = pos
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowerCamelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowerCamelCase = 2 * start + 1
else:
__lowerCamelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowerCamelCase , __lowerCamelCase = heap[smallest_child], positions[smallest_child]
__lowerCamelCase , __lowerCamelCase = (
heap[start],
positions[start],
)
__lowerCamelCase , __lowerCamelCase = temp, tempa
__lowerCamelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , lowerCamelCase__ )
self.top_to_bottom(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = position[index]
while index != 0:
__lowerCamelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowerCamelCase = heap[parent]
__lowerCamelCase = position[parent]
self.set_position(position[parent] , lowerCamelCase__ )
else:
__lowerCamelCase = val
__lowerCamelCase = temp
self.set_position(lowerCamelCase__ , lowerCamelCase__ )
break
__lowerCamelCase = parent
else:
__lowerCamelCase = val
__lowerCamelCase = temp
self.set_position(lowerCamelCase__ , 0 )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = len(lowerCamelCase__ ) // 2 - 1
for i in range(lowerCamelCase__ , -1 , -1 ):
self.top_to_bottom(lowerCamelCase__ , lowerCamelCase__ , len(lowerCamelCase__ ) , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = positions[0]
__lowerCamelCase = sys.maxsize
self.top_to_bottom(lowerCamelCase__ , 0 , len(lowerCamelCase__ ) , lowerCamelCase__ )
return temp
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = Heap()
__lowerCamelCase = [0] * len(UpperCamelCase__ )
__lowerCamelCase = [-1] * len(UpperCamelCase__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowerCamelCase = [] # Heap of Distance of vertices from their neighboring vertex
__lowerCamelCase = []
for vertex in range(len(UpperCamelCase__ ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCamelCase__ )
heap.node_position.append(UpperCamelCase__ )
__lowerCamelCase = []
__lowerCamelCase = 1
__lowerCamelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowerCamelCase = 0
__lowerCamelCase = distance
heap.heapify(UpperCamelCase__ , UpperCamelCase__ )
for _ in range(1 , len(UpperCamelCase__ ) ):
__lowerCamelCase = heap.delete_minimum(UpperCamelCase__ , UpperCamelCase__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__lowerCamelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCamelCase__ )]
):
__lowerCamelCase = distance
heap.bottom_to_top(
UpperCamelCase__ , heap.get_position(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__A = int(input("Enter number of edges: ").strip())
__A = defaultdict(list)
for _ in range(edges_number):
__A = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 348 | 1 |
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("0.8.3"):
raise Exception("requires gluonnlp == 0.8.3")
if version.parse(mx.__version__) != version.parse("1.5.0"):
raise Exception("requires mxnet == 1.5.0")
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = "The Nymphenburg Palace is a beautiful palace in Munich!"
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> int:
"""simple docstring"""
__lowerCamelCase = {
'attention_cell': 'multi_head',
'num_layers': 4,
'units': 1024,
'hidden_size': 768,
'max_length': 512,
'num_heads': 8,
'scaled': True,
'dropout': 0.1,
'use_residual': True,
'embed_size': 1024,
'embed_dropout': 0.1,
'word_embed': None,
'layer_norm_eps': 1E-5,
'token_type_vocab_size': 2,
}
__lowerCamelCase = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__lowerCamelCase = BERTEncoder(
attention_cell=predefined_args['attention_cell'] , num_layers=predefined_args['num_layers'] , units=predefined_args['units'] , hidden_size=predefined_args['hidden_size'] , max_length=predefined_args['max_length'] , num_heads=predefined_args['num_heads'] , scaled=predefined_args['scaled'] , dropout=predefined_args['dropout'] , output_attention=UpperCamelCase__ , output_all_encodings=UpperCamelCase__ , use_residual=predefined_args['use_residual'] , activation=predefined_args.get('activation' , 'gelu' ) , layer_norm_eps=predefined_args.get('layer_norm_eps' , UpperCamelCase__ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__lowerCamelCase = 'openwebtext_ccnews_stories_books_cased'
# Specify download folder to Gluonnlp's vocab
__lowerCamelCase = os.path.join(get_home_dir() , 'models' )
__lowerCamelCase = _load_vocab(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , cls=UpperCamelCase__ )
__lowerCamelCase = nlp.model.BERTModel(
UpperCamelCase__ , len(UpperCamelCase__ ) , units=predefined_args['units'] , embed_size=predefined_args['embed_size'] , embed_dropout=predefined_args['embed_dropout'] , word_embed=predefined_args['word_embed'] , use_pooler=UpperCamelCase__ , use_token_type_embed=UpperCamelCase__ , token_type_vocab_size=predefined_args['token_type_vocab_size'] , use_classifier=UpperCamelCase__ , use_decoder=UpperCamelCase__ , )
original_bort.load_parameters(UpperCamelCase__ , cast_dtype=UpperCamelCase__ , ignore_extra=UpperCamelCase__ )
__lowerCamelCase = original_bort._collect_params_with_prefix()
# Build our config 🤗
__lowerCamelCase = {
'architectures': ['BertForMaskedLM'],
'attention_probs_dropout_prob': predefined_args['dropout'],
'hidden_act': 'gelu',
'hidden_dropout_prob': predefined_args['dropout'],
'hidden_size': predefined_args['embed_size'],
'initializer_range': 0.02,
'intermediate_size': predefined_args['hidden_size'],
'layer_norm_eps': predefined_args['layer_norm_eps'],
'max_position_embeddings': predefined_args['max_length'],
'model_type': 'bort',
'num_attention_heads': predefined_args['num_heads'],
'num_hidden_layers': predefined_args['num_layers'],
'pad_token_id': 1, # 2 = BERT, 1 = RoBERTa
'type_vocab_size': 1, # 2 = BERT, 1 = RoBERTa
'vocab_size': len(UpperCamelCase__ ),
}
__lowerCamelCase = BertConfig.from_dict(UpperCamelCase__ )
__lowerCamelCase = BertForMaskedLM(UpperCamelCase__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(UpperCamelCase__ : Dict ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] ):
__lowerCamelCase = hf_param.shape
__lowerCamelCase = to_torch(params[gluon_param] )
__lowerCamelCase = gluon_param.shape
assert (
shape_hf == shape_gluon
), F"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
__lowerCamelCase = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , 'word_embed.0.weight' )
__lowerCamelCase = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , 'encoder.position_weight' )
__lowerCamelCase = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , 'encoder.layer_norm.beta' )
__lowerCamelCase = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , 'encoder.layer_norm.gamma' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__lowerCamelCase = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__lowerCamelCase = hf_bort_model.bert.encoder.layer[i]
# self attention
__lowerCamelCase = layer.attention.self
__lowerCamelCase = check_and_map_params(
self_attn.key.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
__lowerCamelCase = check_and_map_params(
self_attn.key.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
__lowerCamelCase = check_and_map_params(
self_attn.query.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
__lowerCamelCase = check_and_map_params(
self_attn.query.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
__lowerCamelCase = check_and_map_params(
self_attn.value.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
__lowerCamelCase = check_and_map_params(
self_attn.value.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
__lowerCamelCase = layer.attention.output
__lowerCamelCase = check_and_map_params(
self_output.dense.bias , F"""encoder.transformer_cells.{i}.proj.bias""" )
__lowerCamelCase = check_and_map_params(
self_output.dense.weight , F"""encoder.transformer_cells.{i}.proj.weight""" )
__lowerCamelCase = check_and_map_params(
self_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.layer_norm.beta""" )
__lowerCamelCase = check_and_map_params(
self_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
__lowerCamelCase = layer.intermediate
__lowerCamelCase = check_and_map_params(
intermediate.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
__lowerCamelCase = check_and_map_params(
intermediate.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
__lowerCamelCase = layer.output
__lowerCamelCase = check_and_map_params(
bert_output.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
__lowerCamelCase = check_and_map_params(
bert_output.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
__lowerCamelCase = check_and_map_params(
bert_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
__lowerCamelCase = check_and_map_params(
bert_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__lowerCamelCase = RobertaTokenizer.from_pretrained('roberta-base' )
__lowerCamelCase = tokenizer.encode_plus(UpperCamelCase__ )['input_ids']
# Get gluon output
__lowerCamelCase = mx.nd.array([input_ids] )
__lowerCamelCase = original_bort(inputs=UpperCamelCase__ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(UpperCamelCase__ )
__lowerCamelCase = BertModel.from_pretrained(UpperCamelCase__ )
hf_bort_model.eval()
__lowerCamelCase = tokenizer.encode_plus(UpperCamelCase__ , return_tensors='pt' )
__lowerCamelCase = hf_bort_model(**UpperCamelCase__ )[0]
__lowerCamelCase = output_gluon[0].asnumpy()
__lowerCamelCase = output_hf[0].detach().numpy()
__lowerCamelCase = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__lowerCamelCase = np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-3 )
if success:
print('✔️ Both model do output the same tensors' )
else:
print('❌ Both model do **NOT** output the same tensors' )
print('Absolute difference is:' , UpperCamelCase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__A = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 348 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=__magic_name__ )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = field(default='''question-answering-extractive''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
snake_case_ = Features({'''question''': Value('''string''' ), '''context''': Value('''string''' )} )
snake_case_ = Features(
{
'''answers''': Sequence(
{
'''text''': Value('''string''' ),
'''answer_start''': Value('''int32''' ),
} )
} )
snake_case_ = "question"
snake_case_ = "context"
snake_case_ = "answers"
@property
def lowercase_ ( self ) -> Dict[str, str]:
'''simple docstring'''
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 348 | 1 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A = {
"facebook/mask2former-swin-small-coco-instance": (
"https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
__A = logging.get_logger(__name__)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''mask2former'''
snake_case_ = ['''swin''']
snake_case_ = {'''hidden_size''': '''hidden_dim'''}
def __init__( self , lowerCamelCase__ = None , lowerCamelCase__ = 256 , lowerCamelCase__ = 256 , lowerCamelCase__ = 256 , lowerCamelCase__ = 1_024 , lowerCamelCase__ = "relu" , lowerCamelCase__ = 6 , lowerCamelCase__ = 10 , lowerCamelCase__ = 8 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = 2_048 , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = 4 , lowerCamelCase__ = 255 , lowerCamelCase__ = 100 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 2.0 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 12_544 , lowerCamelCase__ = 3.0 , lowerCamelCase__ = 0.75 , lowerCamelCase__ = 0.02 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = True , lowerCamelCase__ = [4, 8, 16, 32] , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> Tuple:
'''simple docstring'''
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
__lowerCamelCase = CONFIG_MAPPING['swin'](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=lowerCamelCase__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase = backbone_config.pop('model_type' )
__lowerCamelCase = CONFIG_MAPPING[backbone_model_type]
__lowerCamelCase = config_class.from_dict(lowerCamelCase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
f"""Supported model types: {','.join(self.backbones_supported )}""" )
__lowerCamelCase = backbone_config
__lowerCamelCase = feature_size
__lowerCamelCase = mask_feature_size
__lowerCamelCase = hidden_dim
__lowerCamelCase = encoder_feedforward_dim
__lowerCamelCase = activation_function
__lowerCamelCase = encoder_layers
__lowerCamelCase = decoder_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = dropout
__lowerCamelCase = dim_feedforward
__lowerCamelCase = pre_norm
__lowerCamelCase = enforce_input_projection
__lowerCamelCase = common_stride
__lowerCamelCase = ignore_value
__lowerCamelCase = num_queries
__lowerCamelCase = no_object_weight
__lowerCamelCase = class_weight
__lowerCamelCase = mask_weight
__lowerCamelCase = dice_weight
__lowerCamelCase = train_num_points
__lowerCamelCase = oversample_ratio
__lowerCamelCase = importance_sample_ratio
__lowerCamelCase = init_std
__lowerCamelCase = init_xavier_std
__lowerCamelCase = use_auxiliary_loss
__lowerCamelCase = feature_strides
__lowerCamelCase = output_auxiliary_logits
__lowerCamelCase = decoder_layers
super().__init__(**lowerCamelCase__ )
@classmethod
def lowercase_ ( cls , lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
return cls(
backbone_config=lowerCamelCase__ , **lowerCamelCase__ , )
def lowercase_ ( self ) -> Dict[str, any]:
'''simple docstring'''
__lowerCamelCase = copy.deepcopy(self.__dict__ )
__lowerCamelCase = self.backbone_config.to_dict()
__lowerCamelCase = self.__class__.model_type
return output
| 348 |
import requests
__A = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> None:
"""simple docstring"""
__lowerCamelCase = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['articles'] , 1 ):
print(F"""{i}.) {article['title']}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
| 348 | 1 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=14 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=0.02 , ) -> int:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = rotary_dim
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = initializer_range
__lowerCamelCase = None
__lowerCamelCase = vocab_size - 1
__lowerCamelCase = vocab_size - 1
__lowerCamelCase = vocab_size - 1
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowerCamelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
__lowerCamelCase = 20
__lowerCamelCase = model_class_name(lowerCamelCase__ )
__lowerCamelCase = model.init_cache(input_ids.shape[0] , lowerCamelCase__ )
__lowerCamelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
__lowerCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCamelCase = model(
input_ids[:, :-1] , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , position_ids=lowerCamelCase__ , )
__lowerCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCamelCase = model(
input_ids[:, -1:] , attention_mask=lowerCamelCase__ , past_key_values=outputs_cache.past_key_values , position_ids=lowerCamelCase__ , )
__lowerCamelCase = model(lowerCamelCase__ )
__lowerCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = 20
__lowerCamelCase = model_class_name(lowerCamelCase__ )
__lowerCamelCase = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__lowerCamelCase = model.init_cache(input_ids.shape[0] , lowerCamelCase__ )
__lowerCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCamelCase = model(
input_ids[:, :-1] , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , position_ids=lowerCamelCase__ , )
__lowerCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCamelCase = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowerCamelCase__ , position_ids=lowerCamelCase__ , )
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
__lowerCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
snake_case_ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = FlaxGPTJModelTester(self )
def lowercase_ ( self ) -> str:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
@tooslow
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
__lowerCamelCase = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=lowerCamelCase__ , truncation=lowerCamelCase__ )
__lowerCamelCase = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
__lowerCamelCase = False
__lowerCamelCase = model.config.eos_token_id
__lowerCamelCase = jax.jit(model.generate )
__lowerCamelCase = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
__lowerCamelCase = tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
__lowerCamelCase = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@is_pt_flax_cross_test
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCamelCase = getattr(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase = pt_inputs['input_ids'].shape
__lowerCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase__ ):
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = pt_model_class(lowerCamelCase__ ).eval()
__lowerCamelCase = model_class(lowerCamelCase__ , dtype=jnp.floataa )
__lowerCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCamelCase__ )
__lowerCamelCase = fx_state
with torch.no_grad():
__lowerCamelCase = pt_model(**lowerCamelCase__ ).to_tuple()
__lowerCamelCase = fx_model(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase__ )
__lowerCamelCase = model_class.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
__lowerCamelCase = fx_model_loaded(**lowerCamelCase__ ).to_tuple()
self.assertEqual(
len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCamelCase = getattr(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = pt_model_class(lowerCamelCase__ ).eval()
__lowerCamelCase = model_class(lowerCamelCase__ , dtype=jnp.floataa )
__lowerCamelCase = load_flax_weights_in_pytorch_model(lowerCamelCase__ , fx_model.params )
__lowerCamelCase , __lowerCamelCase = pt_inputs['input_ids'].shape
__lowerCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase__ ):
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = 0
__lowerCamelCase = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__lowerCamelCase = pt_model(**lowerCamelCase__ ).to_tuple()
__lowerCamelCase = fx_model(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase__ )
__lowerCamelCase = pt_model_class.from_pretrained(lowerCamelCase__ , from_flax=lowerCamelCase__ )
with torch.no_grad():
__lowerCamelCase = pt_model_loaded(**lowerCamelCase__ ).to_tuple()
self.assertEqual(
len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowerCamelCase = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
__lowerCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
| 348 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__A = logging.get_logger(__name__)
__A = TypeVar("DatasetType", Dataset, IterableDataset)
def lowerCamelCase_ ( UpperCamelCase__ : List[DatasetType] , UpperCamelCase__ : Optional[List[float]] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[DatasetInfo] = None , UpperCamelCase__ : Optional[NamedSplit] = None , UpperCamelCase__ : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType:
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(UpperCamelCase__ ):
if not isinstance(UpperCamelCase__ , (Dataset, IterableDataset) ):
if isinstance(UpperCamelCase__ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'is an empty dataset dictionary.' )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(UpperCamelCase__ )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(UpperCamelCase__ ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase__ ).__name__}.""" )
if i == 0:
__lowerCamelCase , __lowerCamelCase = (
(Dataset, IterableDataset) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else (IterableDataset, Dataset)
)
elif not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , stopping_strategy=UpperCamelCase__ )
else:
return _interleave_iterable_datasets(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , stopping_strategy=UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : List[DatasetType] , UpperCamelCase__ : Optional[DatasetInfo] = None , UpperCamelCase__ : Optional[NamedSplit] = None , UpperCamelCase__ : int = 0 , ) -> DatasetType:
"""simple docstring"""
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(UpperCamelCase__ ):
if not isinstance(UpperCamelCase__ , (Dataset, IterableDataset) ):
if isinstance(UpperCamelCase__ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'is an empty dataset dictionary.' )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(UpperCamelCase__ )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(UpperCamelCase__ ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase__ ).__name__}.""" )
if i == 0:
__lowerCamelCase , __lowerCamelCase = (
(Dataset, IterableDataset) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else (IterableDataset, Dataset)
)
elif not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , axis=UpperCamelCase__ )
else:
return _concatenate_iterable_datasets(UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , axis=UpperCamelCase__ )
| 348 | 1 |
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
"""simple docstring"""
snake_case_ = '''pixel_values'''
snake_case_ = False
snake_case_ = TimmBackboneConfig
def __init__( self , lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , 'timm' )
super().__init__(lowerCamelCase__ )
__lowerCamelCase = config
if config.backbone is None:
raise ValueError('backbone is not set in the config. Please set it to a timm model name.' )
if config.backbone not in timm.list_models():
raise ValueError(f"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(lowerCamelCase__ , 'out_features' ) and config.out_features is not None:
raise ValueError('out_features is not supported by TimmBackbone. Please use out_indices instead.' )
__lowerCamelCase = getattr(lowerCamelCase__ , 'use_pretrained_backbone' , lowerCamelCase__ )
if pretrained is None:
raise ValueError('use_pretrained_backbone is not set in the config. Please set it to True or False.' )
# We just take the final layer by default. This matches the default for the transformers models.
__lowerCamelCase = config.out_indices if getattr(lowerCamelCase__ , 'out_indices' , lowerCamelCase__ ) is not None else (-1,)
__lowerCamelCase = timm.create_model(
config.backbone , pretrained=lowerCamelCase__ , features_only=config.features_only , in_chans=config.num_channels , out_indices=lowerCamelCase__ , **lowerCamelCase__ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
__lowerCamelCase = self._backbone.return_layers
__lowerCamelCase = {layer['module']: str(lowerCamelCase__ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(lowerCamelCase__ )
@classmethod
def lowercase_ ( cls , lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['vision', 'timm'] )
from ...models.timm_backbone import TimmBackboneConfig
__lowerCamelCase = kwargs.pop('config' , TimmBackboneConfig() )
__lowerCamelCase = kwargs.pop('use_timm_backbone' , lowerCamelCase__ )
if not use_timm:
raise ValueError('use_timm_backbone must be True for timm backbones' )
__lowerCamelCase = kwargs.pop('num_channels' , config.num_channels )
__lowerCamelCase = kwargs.pop('features_only' , config.features_only )
__lowerCamelCase = kwargs.pop('use_pretrained_backbone' , config.use_pretrained_backbone )
__lowerCamelCase = kwargs.pop('out_indices' , config.out_indices )
__lowerCamelCase = TimmBackboneConfig(
backbone=lowerCamelCase__ , num_channels=lowerCamelCase__ , features_only=lowerCamelCase__ , use_pretrained_backbone=lowerCamelCase__ , out_indices=lowerCamelCase__ , )
return super()._from_config(lowerCamelCase__ , **lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
pass
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , **lowerCamelCase__ ) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
__lowerCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('Cannot output attentions for timm backbones at the moment' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
__lowerCamelCase = self._all_layers
__lowerCamelCase = self._backbone(lowerCamelCase__ , **lowerCamelCase__ )
__lowerCamelCase = self._return_layers
__lowerCamelCase = tuple(hidden_states[i] for i in self.out_indices )
else:
__lowerCamelCase = self._backbone(lowerCamelCase__ , **lowerCamelCase__ )
__lowerCamelCase = None
__lowerCamelCase = tuple(lowerCamelCase__ )
__lowerCamelCase = tuple(lowerCamelCase__ ) if hidden_states is not None else None
if not return_dict:
__lowerCamelCase = (feature_maps,)
if output_hidden_states:
__lowerCamelCase = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=lowerCamelCase__ , hidden_states=lowerCamelCase__ , attentions=lowerCamelCase__ )
| 348 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = ["model.decoder.embed_positions.weights"]
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> List[Any]:
"""simple docstring"""
if "emb" in name:
__lowerCamelCase = name.replace('emb' , 'model.decoder.embed_tokens' )
if "transformer" in name:
__lowerCamelCase = name.replace('transformer' , 'model.decoder' )
if "cross_attention" in name:
__lowerCamelCase = name.replace('cross_attention' , 'encoder_attn' )
if "linear1" in name:
__lowerCamelCase = name.replace('linear1' , 'fc1' )
if "linear2" in name:
__lowerCamelCase = name.replace('linear2' , 'fc2' )
if "norm1" in name:
__lowerCamelCase = name.replace('norm1' , 'self_attn_layer_norm' )
if "norm_cross" in name:
__lowerCamelCase = name.replace('norm_cross' , 'encoder_attn_layer_norm' )
if "norm2" in name:
__lowerCamelCase = name.replace('norm2' , 'final_layer_norm' )
if "out_norm" in name:
__lowerCamelCase = name.replace('out_norm' , 'model.decoder.layer_norm' )
if "linears" in name:
__lowerCamelCase = name.replace('linears' , 'lm_heads' )
if "condition_provider.conditioners.description.output_proj" in name:
__lowerCamelCase = name.replace('condition_provider.conditioners.description.output_proj' , 'enc_to_dec_proj' )
return name
def lowerCamelCase_ ( UpperCamelCase__ : OrderedDict , UpperCamelCase__ : int ) -> Tuple[Dict, Dict]:
"""simple docstring"""
__lowerCamelCase = list(state_dict.keys() )
__lowerCamelCase = {}
for key in keys:
__lowerCamelCase = state_dict.pop(UpperCamelCase__ )
__lowerCamelCase = rename_keys(UpperCamelCase__ )
if "in_proj_weight" in key:
# split fused qkv proj
__lowerCamelCase = val[:hidden_size, :]
__lowerCamelCase = val[hidden_size : 2 * hidden_size, :]
__lowerCamelCase = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__lowerCamelCase = val
else:
__lowerCamelCase = val
return state_dict, enc_dec_proj_state_dict
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
__lowerCamelCase = 1024
__lowerCamelCase = 24
__lowerCamelCase = 16
elif checkpoint == "medium":
__lowerCamelCase = 1536
__lowerCamelCase = 48
__lowerCamelCase = 24
elif checkpoint == "large":
__lowerCamelCase = 2048
__lowerCamelCase = 48
__lowerCamelCase = 32
else:
raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
__lowerCamelCase = MusicgenDecoderConfig(
hidden_size=UpperCamelCase__ , ffn_dim=hidden_size * 4 , num_hidden_layers=UpperCamelCase__ , num_attention_heads=UpperCamelCase__ , )
return config
@torch.no_grad()
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Optional[int]="cpu" ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = MusicGen.get_pretrained(UpperCamelCase__ , device=UpperCamelCase__ )
__lowerCamelCase = decoder_config_from_checkpoint(UpperCamelCase__ )
__lowerCamelCase = fairseq_model.lm.state_dict()
__lowerCamelCase , __lowerCamelCase = rename_state_dict(
UpperCamelCase__ , hidden_size=decoder_config.hidden_size )
__lowerCamelCase = TaEncoderModel.from_pretrained('t5-base' )
__lowerCamelCase = EncodecModel.from_pretrained('facebook/encodec_32khz' )
__lowerCamelCase = MusicgenForCausalLM(UpperCamelCase__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__lowerCamelCase , __lowerCamelCase = decoder.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
for key in missing_keys.copy():
if key.startswith(('text_encoder', 'audio_encoder') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(UpperCamelCase__ ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
__lowerCamelCase = MusicgenForConditionalGeneration(text_encoder=UpperCamelCase__ , audio_encoder=UpperCamelCase__ , decoder=UpperCamelCase__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(UpperCamelCase__ )
# check we can do a forward pass
__lowerCamelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__lowerCamelCase = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__lowerCamelCase = model(input_ids=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('Incorrect shape for logits' )
# now construct the processor
__lowerCamelCase = AutoTokenizer.from_pretrained('t5-base' )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained('facebook/encodec_32khz' , padding_side='left' )
__lowerCamelCase = MusicgenProcessor(feature_extractor=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
# set the appropriate bos/pad token ids
__lowerCamelCase = 2048
__lowerCamelCase = 2048
# set other default generation config params
__lowerCamelCase = int(30 * audio_encoder.config.frame_rate )
__lowerCamelCase = True
__lowerCamelCase = 3.0
if pytorch_dump_folder is not None:
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(UpperCamelCase__ )
processor.push_to_hub(UpperCamelCase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
__A = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 348 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 348 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''sew-d'''
def __init__( self , lowerCamelCase__=32 , lowerCamelCase__=768 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__=3_072 , lowerCamelCase__=2 , lowerCamelCase__=512 , lowerCamelCase__=256 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=("p2c", "c2p") , lowerCamelCase__="layer_norm" , lowerCamelCase__="gelu_python" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.0 , lowerCamelCase__=0.1 , lowerCamelCase__=0.02 , lowerCamelCase__=1e-7 , lowerCamelCase__=1e-5 , lowerCamelCase__="group" , lowerCamelCase__="gelu" , lowerCamelCase__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowerCamelCase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCamelCase__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCamelCase__=False , lowerCamelCase__=128 , lowerCamelCase__=16 , lowerCamelCase__=True , lowerCamelCase__=0.05 , lowerCamelCase__=10 , lowerCamelCase__=2 , lowerCamelCase__=0.0 , lowerCamelCase__=10 , lowerCamelCase__=0 , lowerCamelCase__="mean" , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=256 , lowerCamelCase__=0 , lowerCamelCase__=1 , lowerCamelCase__=2 , **lowerCamelCase__ , ) -> Any:
'''simple docstring'''
super().__init__(**lowerCamelCase__ , pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ )
__lowerCamelCase = hidden_size
__lowerCamelCase = feat_extract_norm
__lowerCamelCase = feat_extract_activation
__lowerCamelCase = list(lowerCamelCase__ )
__lowerCamelCase = list(lowerCamelCase__ )
__lowerCamelCase = list(lowerCamelCase__ )
__lowerCamelCase = conv_bias
__lowerCamelCase = num_conv_pos_embeddings
__lowerCamelCase = num_conv_pos_embedding_groups
__lowerCamelCase = len(self.conv_dim )
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = intermediate_size
__lowerCamelCase = squeeze_factor
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = position_buckets
__lowerCamelCase = share_att_key
__lowerCamelCase = relative_attention
__lowerCamelCase = norm_rel_ebd
__lowerCamelCase = list(lowerCamelCase__ )
__lowerCamelCase = hidden_act
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = feat_proj_dropout
__lowerCamelCase = final_dropout
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = feature_layer_norm_eps
__lowerCamelCase = initializer_range
__lowerCamelCase = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCamelCase = apply_spec_augment
__lowerCamelCase = mask_time_prob
__lowerCamelCase = mask_time_length
__lowerCamelCase = mask_time_min_masks
__lowerCamelCase = mask_feature_prob
__lowerCamelCase = mask_feature_length
__lowerCamelCase = mask_feature_min_masks
# ctc loss
__lowerCamelCase = ctc_loss_reduction
__lowerCamelCase = ctc_zero_infinity
# sequence classification
__lowerCamelCase = use_weighted_layer_sum
__lowerCamelCase = classifier_proj_size
@property
def lowercase_ ( self ) -> Any:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 348 | 1 |
def lowerCamelCase_ ( UpperCamelCase__ : int ) -> list:
"""simple docstring"""
__lowerCamelCase = int(UpperCamelCase__ )
if n_element < 1:
__lowerCamelCase = ValueError('a should be a positive number' )
raise my_error
__lowerCamelCase = [1]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = (0, 0, 0)
__lowerCamelCase = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
__A = input("Enter the last number (nth term) of the Hamming Number Series: ")
print("Formula of Hamming Number Series => 2^i * 3^j * 5^k")
__A = hamming(int(n))
print("-----------------------------------------------------")
print(f'''The list with nth numbers is: {hamming_numbers}''')
print("-----------------------------------------------------")
| 348 |
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__A = logging.get_logger("transformers.models.speecht5")
__A = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
__A = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
__A = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
__A = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
__A = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
__A = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
__A = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
__A = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
__A = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__A = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__A = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__A = []
__A = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
__A = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
__A = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
__A = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ) -> Dict:
"""simple docstring"""
for attribute in key.split('.' ):
__lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ )
if weight_type is not None:
__lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ ).shape
else:
__lowerCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__lowerCamelCase = value
elif weight_type == "weight_g":
__lowerCamelCase = value
elif weight_type == "weight_v":
__lowerCamelCase = value
elif weight_type == "bias":
__lowerCamelCase = value
elif weight_type == "running_mean":
__lowerCamelCase = value
elif weight_type == "running_var":
__lowerCamelCase = value
elif weight_type == "num_batches_tracked":
__lowerCamelCase = value
else:
__lowerCamelCase = value
logger.info(F"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict ) -> Any:
"""simple docstring"""
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
__lowerCamelCase , __lowerCamelCase = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowerCamelCase = []
if task == "s2t":
__lowerCamelCase = hf_model.speechta.encoder.prenet.feature_encoder
__lowerCamelCase = MAPPING_S2T
__lowerCamelCase = IGNORE_KEYS_S2T
elif task == "t2s":
__lowerCamelCase = None
__lowerCamelCase = MAPPING_T2S
__lowerCamelCase = IGNORE_KEYS_T2S
elif task == "s2s":
__lowerCamelCase = hf_model.speechta.encoder.prenet.feature_encoder
__lowerCamelCase = MAPPING_S2S
__lowerCamelCase = IGNORE_KEYS_S2S
else:
raise ValueError(F"""Unsupported task: {task}""" )
for name, value in fairseq_dict.items():
if should_ignore(UpperCamelCase__ , UpperCamelCase__ ):
logger.info(F"""{name} was ignored""" )
continue
__lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hf_model.config.feat_extract_norm == 'group' , )
__lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
__lowerCamelCase , __lowerCamelCase = key.split('.*.' )
if prefix in name and suffix in name:
__lowerCamelCase = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
__lowerCamelCase = True
if "*" in mapped_key:
__lowerCamelCase = name.split(UpperCamelCase__ )[0].split('.' )[-2]
__lowerCamelCase = mapped_key.replace('*' , UpperCamelCase__ )
if "weight_g" in name:
__lowerCamelCase = 'weight_g'
elif "weight_v" in name:
__lowerCamelCase = 'weight_v'
elif "bias" in name:
__lowerCamelCase = 'bias'
elif "weight" in name:
__lowerCamelCase = 'weight'
elif "running_mean" in name:
__lowerCamelCase = 'running_mean'
elif "running_var" in name:
__lowerCamelCase = 'running_var'
elif "num_batches_tracked" in name:
__lowerCamelCase = 'num_batches_tracked'
else:
__lowerCamelCase = None
set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
continue
if not is_used:
unused_weights.append(UpperCamelCase__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : List[str] ) -> Tuple:
"""simple docstring"""
__lowerCamelCase = full_name.split('conv_layers.' )[-1]
__lowerCamelCase = name.split('.' )
__lowerCamelCase = int(items[0] )
__lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCamelCase__ )
@torch.no_grad()
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : str=None , UpperCamelCase__ : List[str]=None , ) -> Tuple:
"""simple docstring"""
if config_path is not None:
__lowerCamelCase = SpeechTaConfig.from_pretrained(UpperCamelCase__ )
else:
__lowerCamelCase = SpeechTaConfig()
if task == "s2t":
__lowerCamelCase = config.max_text_positions
__lowerCamelCase = SpeechTaForSpeechToText(UpperCamelCase__ )
elif task == "t2s":
__lowerCamelCase = 1876
__lowerCamelCase = 600
__lowerCamelCase = config.max_speech_positions
__lowerCamelCase = SpeechTaForTextToSpeech(UpperCamelCase__ )
elif task == "s2s":
__lowerCamelCase = 1876
__lowerCamelCase = config.max_speech_positions
__lowerCamelCase = SpeechTaForSpeechToSpeech(UpperCamelCase__ )
else:
raise ValueError(F"""Unknown task name: {task}""" )
if vocab_path:
__lowerCamelCase = SpeechTaTokenizer(UpperCamelCase__ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
__lowerCamelCase = AddedToken('<mask>' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
__lowerCamelCase = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
__lowerCamelCase = SpeechTaFeatureExtractor()
__lowerCamelCase = SpeechTaProcessor(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
__lowerCamelCase = torch.load(UpperCamelCase__ )
recursively_load_weights(fairseq_checkpoint['model'] , UpperCamelCase__ , UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
if repo_id:
print('Pushing to the hub...' )
processor.push_to_hub(UpperCamelCase__ )
model.push_to_hub(UpperCamelCase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
__A = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 348 | 1 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def lowercase_ ( self , lowerCamelCase__=0 ) -> int:
'''simple docstring'''
__lowerCamelCase = floats_tensor((1, 3, 128, 128) , rng=random.Random(lowerCamelCase__ ) )
__lowerCamelCase = np.random.RandomState(lowerCamelCase__ )
__lowerCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__lowerCamelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__lowerCamelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
# warmup pass to apply optimizations
__lowerCamelCase = pipe(**self.get_dummy_inputs() )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__lowerCamelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__lowerCamelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__lowerCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def lowercase_ ( self ) -> int:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = ort.SessionOptions()
__lowerCamelCase = False
return options
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
__lowerCamelCase = init_image.resize((768, 512) )
# using the PNDM scheduler by default
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = 'A fantasy landscape, trending on artstation'
__lowerCamelCase = np.random.RandomState(0 )
__lowerCamelCase = pipe(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCamelCase__ , output_type='np' , )
__lowerCamelCase = output.images
__lowerCamelCase = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__lowerCamelCase = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
__lowerCamelCase = init_image.resize((768, 512) )
__lowerCamelCase = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=lowerCamelCase__ , safety_checker=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = 'A fantasy landscape, trending on artstation'
__lowerCamelCase = np.random.RandomState(0 )
__lowerCamelCase = pipe(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=lowerCamelCase__ , output_type='np' , )
__lowerCamelCase = output.images
__lowerCamelCase = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__lowerCamelCase = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 348 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = [False] * len(UpperCamelCase__ )
__lowerCamelCase = [-1] * len(UpperCamelCase__ )
def dfs(UpperCamelCase__ : int , UpperCamelCase__ : int ):
__lowerCamelCase = True
__lowerCamelCase = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCamelCase__ , 1 - c )
for i in range(len(UpperCamelCase__ ) ):
if not visited[i]:
dfs(UpperCamelCase__ , 0 )
for i in range(len(UpperCamelCase__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
__A = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 348 | 1 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''char'''
snake_case_ = '''bpe'''
snake_case_ = '''wp'''
__A = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = ['''image_processor''', '''char_tokenizer''']
snake_case_ = '''ViTImageProcessor'''
snake_case_ = '''MgpstrTokenizer'''
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , **lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowerCamelCase__ , )
__lowerCamelCase = kwargs.pop('feature_extractor' )
__lowerCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
__lowerCamelCase = tokenizer
__lowerCamelCase = AutoTokenizer.from_pretrained('gpt2' )
__lowerCamelCase = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
def __call__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
__lowerCamelCase = self.image_processor(lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
if text is not None:
__lowerCamelCase = self.char_tokenizer(lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
if text is None:
return inputs
elif images is None:
return encodings
else:
__lowerCamelCase = encodings['input_ids']
return inputs
def lowercase_ ( self , lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = sequences
__lowerCamelCase = char_preds.size(0 )
__lowerCamelCase , __lowerCamelCase = self._decode_helper(lowerCamelCase__ , 'char' )
__lowerCamelCase , __lowerCamelCase = self._decode_helper(lowerCamelCase__ , 'bpe' )
__lowerCamelCase , __lowerCamelCase = self._decode_helper(lowerCamelCase__ , 'wp' )
__lowerCamelCase = []
__lowerCamelCase = []
for i in range(lowerCamelCase__ ):
__lowerCamelCase = [char_scores[i], bpe_scores[i], wp_scores[i]]
__lowerCamelCase = [char_strs[i], bpe_strs[i], wp_strs[i]]
__lowerCamelCase = scores.index(max(lowerCamelCase__ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
__lowerCamelCase = {}
__lowerCamelCase = final_strs
__lowerCamelCase = final_scores
__lowerCamelCase = char_strs
__lowerCamelCase = bpe_strs
__lowerCamelCase = wp_strs
return out
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
if format == DecodeType.CHARACTER:
__lowerCamelCase = self.char_decode
__lowerCamelCase = 1
__lowerCamelCase = '[s]'
elif format == DecodeType.BPE:
__lowerCamelCase = self.bpe_decode
__lowerCamelCase = 2
__lowerCamelCase = '#'
elif format == DecodeType.WORDPIECE:
__lowerCamelCase = self.wp_decode
__lowerCamelCase = 102
__lowerCamelCase = '[SEP]'
else:
raise ValueError(f"""Format {format} is not supported.""" )
__lowerCamelCase , __lowerCamelCase = [], []
__lowerCamelCase = pred_logits.size(0 )
__lowerCamelCase = pred_logits.size(1 )
__lowerCamelCase , __lowerCamelCase = pred_logits.topk(1 , dim=-1 , largest=lowerCamelCase__ , sorted=lowerCamelCase__ )
__lowerCamelCase = preds_index.view(-1 , lowerCamelCase__ )[:, 1:]
__lowerCamelCase = decoder(lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase = torch.nn.functional.softmax(lowerCamelCase__ , dim=2 ).max(dim=2 )
__lowerCamelCase = preds_max_prob[:, 1:]
for index in range(lowerCamelCase__ ):
__lowerCamelCase = preds_str[index].find(lowerCamelCase__ )
__lowerCamelCase = preds_str[index][:pred_eos]
__lowerCamelCase = preds_index[index].cpu().tolist()
__lowerCamelCase = pred_index.index(lowerCamelCase__ ) if eos_token in pred_index else -1
__lowerCamelCase = preds_max_prob[index][: pred_eos_index + 1]
__lowerCamelCase = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(lowerCamelCase__ )
conf_scores.append(lowerCamelCase__ )
return dec_strs, conf_scores
def lowercase_ ( self , lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(lowerCamelCase__ )]
return decode_strs
def lowercase_ ( self , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(lowerCamelCase__ )]
return decode_strs
| 348 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=lowerCamelCase__ ).to(lowerCamelCase__ )
__lowerCamelCase = AutoTokenizer.from_pretrained('google/mt5-small' )
__lowerCamelCase = tokenizer('Hello there' , return_tensors='pt' ).input_ids
__lowerCamelCase = tokenizer('Hi I am' , return_tensors='pt' ).input_ids
__lowerCamelCase = model(input_ids.to(lowerCamelCase__ ) , labels=labels.to(lowerCamelCase__ ) ).loss
__lowerCamelCase = -(labels.shape[-1] * loss.item())
__lowerCamelCase = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 348 | 1 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Any:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = 1
__lowerCamelCase = 3
__lowerCamelCase = (32, 32)
__lowerCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCamelCase__ )
return image
@property
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def lowercase_ ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(lowerCamelCase__ )
@property
def lowercase_ ( self ) -> Any:
'''simple docstring'''
def extract(*lowerCamelCase__ , **lowerCamelCase__ ):
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = torch.ones([0] )
def lowercase_ ( self , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
self.pixel_values.to(lowerCamelCase__ )
return self
return Out()
return extract
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.dummy_cond_unet
__lowerCamelCase = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , )
__lowerCamelCase = self.dummy_vae
__lowerCamelCase = self.dummy_text_encoder
__lowerCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
__lowerCamelCase = StableDiffusionPipeline(
unet=lowerCamelCase__ , scheduler=lowerCamelCase__ , vae=lowerCamelCase__ , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ , safety_checker=lowerCamelCase__ , feature_extractor=self.dummy_extractor , )
__lowerCamelCase = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = 'A painting of a squirrel eating a burger'
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
__lowerCamelCase = sd_pipe([prompt] , generator=lowerCamelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
__lowerCamelCase = output.images
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
__lowerCamelCase = sd_pipe(
[prompt] , generator=lowerCamelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=lowerCamelCase__ , )[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase = np.array([0.57_56, 0.61_18, 0.50_05, 0.50_41, 0.54_71, 0.47_26, 0.49_76, 0.48_65, 0.48_64] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.dummy_cond_unet
__lowerCamelCase = PNDMScheduler(skip_prk_steps=lowerCamelCase__ )
__lowerCamelCase = self.dummy_vae
__lowerCamelCase = self.dummy_text_encoder
__lowerCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
__lowerCamelCase = StableDiffusionPipeline(
unet=lowerCamelCase__ , scheduler=lowerCamelCase__ , vae=lowerCamelCase__ , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ , safety_checker=lowerCamelCase__ , feature_extractor=self.dummy_extractor , )
__lowerCamelCase = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = 'A painting of a squirrel eating a burger'
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
__lowerCamelCase = sd_pipe([prompt] , generator=lowerCamelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
__lowerCamelCase = output.images
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
__lowerCamelCase = sd_pipe(
[prompt] , generator=lowerCamelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=lowerCamelCase__ , )[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase = np.array([0.51_25, 0.57_16, 0.48_28, 0.50_60, 0.56_50, 0.47_68, 0.51_85, 0.48_95, 0.49_93] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = StableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=lowerCamelCase__ )
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
assert isinstance(pipe.scheduler , lowerCamelCase__ )
assert pipe.safety_checker is None
__lowerCamelCase = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase__ )
__lowerCamelCase = StableDiffusionPipeline.from_pretrained(lowerCamelCase__ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__lowerCamelCase = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = self.dummy_cond_unet
__lowerCamelCase = PNDMScheduler(skip_prk_steps=lowerCamelCase__ )
__lowerCamelCase = self.dummy_vae
__lowerCamelCase = self.dummy_text_encoder
__lowerCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# put models in fp16
__lowerCamelCase = unet.half()
__lowerCamelCase = vae.half()
__lowerCamelCase = bert.half()
# make sure here that pndm scheduler skips prk
__lowerCamelCase = StableDiffusionPipeline(
unet=lowerCamelCase__ , scheduler=lowerCamelCase__ , vae=lowerCamelCase__ , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ , safety_checker=lowerCamelCase__ , feature_extractor=self.dummy_extractor , )
__lowerCamelCase = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = 'A painting of a squirrel eating a burger'
__lowerCamelCase = sd_pipe([prompt] , num_inference_steps=2 , output_type='np' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=lowerCamelCase__ )
__lowerCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__lowerCamelCase = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = (
'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'
' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'
' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'
' children from bahnhof zoo, detailed '
)
__lowerCamelCase = 4_003_660_346
__lowerCamelCase = 7
# without safety guidance (sld_guidance_scale = 0)
__lowerCamelCase = torch.manual_seed(lowerCamelCase__ )
__lowerCamelCase = sd_pipe(
[prompt] , generator=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
__lowerCamelCase = output.images
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = [0.22_78, 0.22_31, 0.22_49, 0.23_33, 0.23_03, 0.18_85, 0.22_73, 0.21_44, 0.21_76]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
__lowerCamelCase = torch.manual_seed(lowerCamelCase__ )
__lowerCamelCase = sd_pipe(
[prompt] , generator=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__lowerCamelCase = output.images
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = [0.23_83, 0.22_76, 0.2_36, 0.21_92, 0.21_86, 0.20_53, 0.19_71, 0.19_01, 0.17_19]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=lowerCamelCase__ )
__lowerCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__lowerCamelCase = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = 'padme amidala taking a bath artwork, safe for work, no nudity'
__lowerCamelCase = 2_734_971_755
__lowerCamelCase = 7
__lowerCamelCase = torch.manual_seed(lowerCamelCase__ )
__lowerCamelCase = sd_pipe(
[prompt] , generator=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
__lowerCamelCase = output.images
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = [0.35_02, 0.36_22, 0.33_96, 0.36_42, 0.34_78, 0.33_18, 0.35, 0.33_48, 0.32_97]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
__lowerCamelCase = torch.manual_seed(lowerCamelCase__ )
__lowerCamelCase = sd_pipe(
[prompt] , generator=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__lowerCamelCase = output.images
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = [0.55_31, 0.52_06, 0.48_95, 0.51_56, 0.51_82, 0.47_51, 0.48_02, 0.48_03, 0.44_43]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' )
__lowerCamelCase = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = (
'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'
' leyendecker'
)
__lowerCamelCase = 1_044_355_234
__lowerCamelCase = 12
__lowerCamelCase = torch.manual_seed(lowerCamelCase__ )
__lowerCamelCase = sd_pipe(
[prompt] , generator=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
__lowerCamelCase = output.images
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
__lowerCamelCase = torch.manual_seed(lowerCamelCase__ )
__lowerCamelCase = sd_pipe(
[prompt] , generator=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__lowerCamelCase = output.images
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = np.array([0.58_18, 0.62_85, 0.68_35, 0.60_19, 0.6_25, 0.67_54, 0.60_96, 0.63_34, 0.65_61] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 348 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> Any:
"""simple docstring"""
__lowerCamelCase = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2]
__lowerCamelCase = True if 'large' in model_name or 'huge' in model_name else False
__lowerCamelCase = True if 'large' in model_name or 'huge' in model_name else False
__lowerCamelCase = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__lowerCamelCase = [3, 3, 3, 3]
__lowerCamelCase = [5, 5, 5, 5]
elif "fl4" in model_name:
__lowerCamelCase = [4, 4, 4, 4]
__lowerCamelCase = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__lowerCamelCase = [3, 3, 3, 3]
if "lrf" in model_name:
__lowerCamelCase = [3, 3, 3, 3]
else:
__lowerCamelCase = [2, 2, 2, 2]
if "tiny" in model_name:
__lowerCamelCase = 96
elif "small" in model_name:
__lowerCamelCase = 96
elif "base" in model_name:
__lowerCamelCase = 128
elif "large" in model_name:
__lowerCamelCase = 192
elif "xlarge" in model_name:
__lowerCamelCase = 256
elif "huge" in model_name:
__lowerCamelCase = 352
# set label information
__lowerCamelCase = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
__lowerCamelCase = 'imagenet-22k-id2label.json'
else:
__lowerCamelCase = 'imagenet-1k-id2label.json'
__lowerCamelCase = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) )
__lowerCamelCase = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
__lowerCamelCase = {v: k for k, v in idalabel.items()}
__lowerCamelCase = FocalNetConfig(
embed_dim=UpperCamelCase__ , depths=UpperCamelCase__ , focal_levels=UpperCamelCase__ , focal_windows=UpperCamelCase__ , use_conv_embed=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ , use_post_layernorm=UpperCamelCase__ , use_layerscale=UpperCamelCase__ , )
return config
def lowerCamelCase_ ( UpperCamelCase__ : Any ) -> str:
"""simple docstring"""
if "patch_embed.proj" in name:
__lowerCamelCase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__lowerCamelCase = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
__lowerCamelCase = 'encoder.' + name
if "encoder.layers" in name:
__lowerCamelCase = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
__lowerCamelCase = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
__lowerCamelCase = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__lowerCamelCase = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__lowerCamelCase = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__lowerCamelCase = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
__lowerCamelCase = 'layernorm.weight'
if name == "norm.bias":
__lowerCamelCase = 'layernorm.bias'
if "head" in name:
__lowerCamelCase = name.replace('head' , 'classifier' )
else:
__lowerCamelCase = 'focalnet.' + name
return name
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=False ) -> Dict:
"""simple docstring"""
__lowerCamelCase = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
__lowerCamelCase = model_name_to_url[model_name]
print('Checkpoint URL: ' , UpperCamelCase__ )
__lowerCamelCase = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
__lowerCamelCase = state_dict.pop(UpperCamelCase__ )
__lowerCamelCase = val
__lowerCamelCase = get_focalnet_config(UpperCamelCase__ )
__lowerCamelCase = FocalNetForImageClassification(UpperCamelCase__ )
model.eval()
# load state dict
model.load_state_dict(UpperCamelCase__ )
# verify conversion
__lowerCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCamelCase = BitImageProcessor(
do_resize=UpperCamelCase__ , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCamelCase__ , crop_size=224 , do_normalize=UpperCamelCase__ , image_mean=UpperCamelCase__ , image_std=UpperCamelCase__ , )
__lowerCamelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
__lowerCamelCase = processor(images=UpperCamelCase__ , return_tensors='pt' )
__lowerCamelCase = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
__lowerCamelCase = image_transforms(UpperCamelCase__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , UpperCamelCase__ , atol=1E-4 )
__lowerCamelCase = model(**UpperCamelCase__ )
__lowerCamelCase = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__lowerCamelCase = torch.tensor([0.21_66, -0.43_68, 0.21_91] )
elif model_name == "focalnet-tiny-lrf":
__lowerCamelCase = torch.tensor([1.16_69, 0.01_25, -0.16_95] )
elif model_name == "focalnet-small":
__lowerCamelCase = torch.tensor([0.49_17, -0.04_30, 0.13_41] )
elif model_name == "focalnet-small-lrf":
__lowerCamelCase = torch.tensor([-0.25_88, -0.53_42, -0.23_31] )
elif model_name == "focalnet-base":
__lowerCamelCase = torch.tensor([-0.16_55, -0.40_90, -0.17_30] )
elif model_name == "focalnet-base-lrf":
__lowerCamelCase = torch.tensor([0.53_06, -0.04_83, -0.39_28] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print(F"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(F"""{model_name}""" )
processor.push_to_hub(F"""{model_name}""" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="focalnet-tiny",
type=str,
help="Name of the FocalNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub.",
)
__A = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 348 | 1 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=99 , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=9 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=32 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__=8 , lowerCamelCase__=0.1 , lowerCamelCase__=0.0_02 , lowerCamelCase__=1 , lowerCamelCase__=0 , lowerCamelCase__=0 , lowerCamelCase__=None , lowerCamelCase__=None , ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = encoder_seq_length
__lowerCamelCase = decoder_seq_length
# For common tests
__lowerCamelCase = self.decoder_seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_attention_mask
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = d_ff
__lowerCamelCase = relative_attention_num_buckets
__lowerCamelCase = dropout_rate
__lowerCamelCase = initializer_factor
__lowerCamelCase = eos_token_id
__lowerCamelCase = pad_token_id
__lowerCamelCase = decoder_start_token_id
__lowerCamelCase = None
__lowerCamelCase = decoder_layers
def lowercase_ ( self ) -> str:
'''simple docstring'''
return TaConfig.from_pretrained('google/umt5-base' )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , ) -> Dict:
'''simple docstring'''
if attention_mask is None:
__lowerCamelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__lowerCamelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__lowerCamelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=lowerCamelCase__ )
if decoder_head_mask is None:
__lowerCamelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=lowerCamelCase__ )
if cross_attn_head_mask is None:
__lowerCamelCase = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=lowerCamelCase__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__lowerCamelCase = input_ids.clamp(self.pad_token_id + 1 )
__lowerCamelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
__lowerCamelCase = self.get_config()
__lowerCamelCase = config.num_attention_heads
__lowerCamelCase = self.prepare_inputs_dict(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return config, input_dict
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = UMTaModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(
input_ids=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ , attention_mask=lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , )
__lowerCamelCase = model(input_ids=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ )
__lowerCamelCase = result.last_hidden_state
__lowerCamelCase = result.past_key_values
__lowerCamelCase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(lowerCamelCase__ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = UMTaModel(config=lowerCamelCase__ ).get_decoder().to(lowerCamelCase__ ).eval()
# first forward pass
__lowerCamelCase = model(lowerCamelCase__ , use_cache=lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ , use_cache=lowerCamelCase__ )
self.parent.assertTrue(len(lowerCamelCase__ ) == len(lowerCamelCase__ ) )
self.parent.assertTrue(len(lowerCamelCase__ ) == len(lowerCamelCase__ ) + 1 )
__lowerCamelCase , __lowerCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCamelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
__lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCamelCase = model(lowerCamelCase__ )['last_hidden_state']
__lowerCamelCase = model(lowerCamelCase__ , past_key_values=lowerCamelCase__ )['last_hidden_state']
# select random slice
__lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCamelCase = output_from_no_past[:, -1, random_slice_idx].detach()
__lowerCamelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = UMTaModel(config=lowerCamelCase__ ).to(lowerCamelCase__ ).half().eval()
__lowerCamelCase = model(**lowerCamelCase__ )['last_hidden_state']
self.parent.assertFalse(torch.isnan(lowerCamelCase__ ).any().item() )
@require_torch
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
snake_case_ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
snake_case_ = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
snake_case_ = True
snake_case_ = False
snake_case_ = False
snake_case_ = True
snake_case_ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
snake_case_ = [0.8, 0.9]
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = UMTaModel(config_and_inputs[0] ).to(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
lowerCamelCase__ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f"""{tmpdirname}/t5_test.onnx""" , export_params=lowerCamelCase__ , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*lowerCamelCase__ )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = config_and_inputs[0]
__lowerCamelCase = UMTaForConditionalGeneration(lowerCamelCase__ ).eval()
model.to(lowerCamelCase__ )
__lowerCamelCase = {
'head_mask': torch.zeros(config.num_layers , config.num_heads , device=lowerCamelCase__ ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=lowerCamelCase__ ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=lowerCamelCase__ ),
}
for attn_name, (name, mask) in zip(lowerCamelCase__ , head_masking.items() ):
__lowerCamelCase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
__lowerCamelCase = torch.ones(
config.num_decoder_layers , config.num_heads , device=lowerCamelCase__ )
__lowerCamelCase = model.generate(
config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=lowerCamelCase__ , return_dict_in_generate=lowerCamelCase__ , **lowerCamelCase__ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
__lowerCamelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def lowercase_ ( self ) -> str:
'''simple docstring'''
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=lowerCamelCase__ ).to(lowerCamelCase__ )
__lowerCamelCase = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=lowerCamelCase__ , legacy=lowerCamelCase__ )
__lowerCamelCase = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
__lowerCamelCase = tokenizer(lowerCamelCase__ , return_tensors='pt' , padding=lowerCamelCase__ ).input_ids
# fmt: off
__lowerCamelCase = torch.tensor(
[
[ 38_530, 210_703, 256_299, 1_410, 256_298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25_922, 256_299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_460, 339, 312, 19_014, 10_620, 758, 256_299, 2_355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256_299, 14_869, 281, 301, 256_298, 275, 119_983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256_299, 14_869, 281, 2_234, 289, 2_275, 333,61_391, 289, 256_298, 543, 256_297, 168_714, 329, 256_296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = model.generate(input_ids.to(lowerCamelCase__ ) )
__lowerCamelCase = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
__lowerCamelCase = tokenizer.batch_decode(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
| 348 |
from __future__ import annotations
def lowerCamelCase_ ( UpperCamelCase__ : list[float] , UpperCamelCase__ : list[float] ) -> float:
"""simple docstring"""
__lowerCamelCase = sorted(numsa + numsa )
__lowerCamelCase , __lowerCamelCase = divmod(len(UpperCamelCase__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = [float(x) for x in input("Enter the elements of first array: ").split()]
__A = [float(x) for x in input("Enter the elements of second array: ").split()]
print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 348 | 1 |
from __future__ import annotations
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> list[str]:
"""simple docstring"""
if partitions <= 0:
raise ValueError('partitions must be a positive number!' )
if partitions > number_of_bytes:
raise ValueError('partitions can not > number_of_bytes!' )
__lowerCamelCase = number_of_bytes // partitions
__lowerCamelCase = []
for i in range(UpperCamelCase__ ):
__lowerCamelCase = i * bytes_per_partition + 1
__lowerCamelCase = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F"""{start_bytes}-{end_bytes}""" )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348 |
__A = {
"joule": 1.0,
"kilojoule": 10_00,
"megajoule": 1_00_00_00,
"gigajoule": 10_00_00_00_00,
"wattsecond": 1.0,
"watthour": 36_00,
"kilowatthour": 3_60_00_00,
"newtonmeter": 1.0,
"calorie_nutr": 41_86.8,
"kilocalorie_nutr": 4_18_68_00.00,
"electronvolt": 1.6_0_2_1_7_6_6_3_4e-1_9,
"britishthermalunit_it": 10_55.0_55_85,
"footpound": 1.3_5_5_8_1_8,
}
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__lowerCamelCase = (
F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {', '.join(UpperCamelCase__ )}"""
)
raise ValueError(UpperCamelCase__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348 | 1 |
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
__lowerCamelCase = deepcopy(lowerCamelCase__ )
elif os.path.exists(lowerCamelCase__ ):
with io.open(lowerCamelCase__ , 'r' , encoding='utf-8' ) as f:
__lowerCamelCase = json.load(lowerCamelCase__ )
else:
try:
__lowerCamelCase = baseaa.urlsafe_baadecode(lowerCamelCase__ ).decode('utf-8' )
__lowerCamelCase = json.loads(lowerCamelCase__ )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" )
__lowerCamelCase = config
self.set_stage_and_offload()
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
__lowerCamelCase = self.get_value('zero_optimization.stage' , -1 )
# offload
__lowerCamelCase = False
if self.is_zeroa() or self.is_zeroa():
__lowerCamelCase = set(['cpu', 'nvme'] )
__lowerCamelCase = set(
[
self.get_value('zero_optimization.offload_optimizer.device' ),
self.get_value('zero_optimization.offload_param.device' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
__lowerCamelCase = True
def lowercase_ ( self , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = self.config
# find the config node of interest if it exists
__lowerCamelCase = ds_key_long.split('.' )
__lowerCamelCase = nodes.pop()
for node in nodes:
__lowerCamelCase = config.get(lowerCamelCase__ )
if config is None:
return None, ds_key
return config, ds_key
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=None ) -> List[str]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.find_config_node(lowerCamelCase__ )
if config is None:
return default
return config.get(lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=False ) -> Any:
'''simple docstring'''
__lowerCamelCase = self.config
# find the config node of interest if it exists
__lowerCamelCase = ds_key_long.split('.' )
for node in nodes:
__lowerCamelCase = config
__lowerCamelCase = config.get(lowerCamelCase__ )
if config is None:
if must_exist:
raise ValueError(f"""Can't find {ds_key_long} entry in the config: {self.config}""" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = self.get_value(lowerCamelCase__ )
return False if value is None else bool(lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = self.get_value(lowerCamelCase__ )
return False if value is None else not bool(lowerCamelCase__ )
def lowercase_ ( self ) -> str:
'''simple docstring'''
return self._stage == 2
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return self._stage == 3
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
return self._offload
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = engine
def lowercase_ ( self , lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
# runs backpropagation and handles mixed precision
self.engine.backward(lowerCamelCase__ , **lowerCamelCase__ )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
super().__init__(lowerCamelCase__ , device_placement=lowerCamelCase__ , scaler=lowerCamelCase__ )
__lowerCamelCase = hasattr(self.optimizer , 'overflow' )
def lowercase_ ( self , lowerCamelCase__=None ) -> Tuple:
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def lowercase_ ( self ) -> str:
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def lowercase_ ( self ) -> Any:
'''simple docstring'''
if self.__has_overflow__:
return self.optimizer.overflow
return False
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=0.0_01 , lowerCamelCase__=0 , **lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = params
__lowerCamelCase = lr
__lowerCamelCase = weight_decay
__lowerCamelCase = kwargs
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=0 , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = optimizer
__lowerCamelCase = total_num_steps
__lowerCamelCase = warmup_num_steps
__lowerCamelCase = kwargs
| 348 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''philschmid/bart-large-cnn-samsum'''
snake_case_ = (
'''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '''
'''and returns a summary of the text.'''
)
snake_case_ = '''summarizer'''
snake_case_ = AutoTokenizer
snake_case_ = AutoModelForSeqaSeqLM
snake_case_ = ['''text''']
snake_case_ = ['''text''']
def lowercase_ ( self , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return self.pre_processor(lowerCamelCase__ , return_tensors='pt' , truncation=lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
return self.model.generate(**lowerCamelCase__ )[0]
def lowercase_ ( self , lowerCamelCase__ ) -> Any:
'''simple docstring'''
return self.pre_processor.decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ )
| 348 | 1 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int]=10 ) -> int:
"""simple docstring"""
__lowerCamelCase = []
for _ in range(UpperCamelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowerCamelCase_ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any]=10 ) -> str:
"""simple docstring"""
__lowerCamelCase = []
for step in range(UpperCamelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase = os.path.join(UpperCamelCase__ , 'schedule.bin' )
torch.save(scheduler.state_dict() , UpperCamelCase__ )
__lowerCamelCase = torch.load(UpperCamelCase__ )
scheduler.load_state_dict(UpperCamelCase__ )
return lrs
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for a, b in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertAlmostEqual(lowerCamelCase__ , lowerCamelCase__ , delta=lowerCamelCase__ )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCamelCase__ )
__lowerCamelCase = torch.tensor([0.4, 0.2, -0.5] )
__lowerCamelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__lowerCamelCase = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
__lowerCamelCase = criterion(lowerCamelCase__ , lowerCamelCase__ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCamelCase__ )
__lowerCamelCase = torch.tensor([0.4, 0.2, -0.5] )
__lowerCamelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__lowerCamelCase = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=lowerCamelCase__ , weight_decay=0.0 , relative_step=lowerCamelCase__ , scale_parameter=lowerCamelCase__ , warmup_init=lowerCamelCase__ , )
for _ in range(1_000 ):
__lowerCamelCase = criterion(lowerCamelCase__ , lowerCamelCase__ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = nn.Linear(50 , 50 ) if is_torch_available() else None
snake_case_ = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
snake_case_ = 10
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ) -> Optional[int]:
'''simple docstring'''
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for a, b in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertAlmostEqual(lowerCamelCase__ , lowerCamelCase__ , delta=lowerCamelCase__ , msg=lowerCamelCase__ )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = {'num_warmup_steps': 2, 'num_training_steps': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
__lowerCamelCase = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'num_warmup_steps': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, 'num_cycles': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, 'power': 2.0, 'lr_end': 1e-7},
[0.0, 5.0, 10.0, 7.6_56, 5.6_25, 3.9_06, 2.5, 1.4_06, 0.6_25, 0.1_56],
),
get_inverse_sqrt_schedule: (
{'num_warmup_steps': 2},
[0.0, 5.0, 10.0, 8.1_65, 7.0_71, 6.3_25, 5.7_74, 5.3_45, 5.0, 4.7_14],
),
}
for scheduler_func, data in scheds.items():
__lowerCamelCase , __lowerCamelCase = data
__lowerCamelCase = scheduler_func(self.optimizer , **lowerCamelCase__ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
__lowerCamelCase = unwrap_schedule(lowerCamelCase__ , self.num_steps )
self.assertListAlmostEqual(
lowerCamelCase__ , lowerCamelCase__ , tol=1e-2 , msg=f"""failed for {scheduler_func} in normal scheduler""" , )
__lowerCamelCase = scheduler_func(self.optimizer , **lowerCamelCase__ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(lowerCamelCase__ ) # wrap to test picklability of the schedule
__lowerCamelCase = unwrap_and_save_reload_schedule(lowerCamelCase__ , self.num_steps )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ , msg=f"""failed for {scheduler_func} in save and reload""" )
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = fn
def __call__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
return self.fn(*lowerCamelCase__ , **lowerCamelCase__ )
@classmethod
def lowercase_ ( self , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = list(map(self , scheduler.lr_lambdas ) )
| 348 |
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=16 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=4 , ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_attention_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_choices
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_attention_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = True
__lowerCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = True
snake_case_ = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowerCamelCase = model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase__ )
__lowerCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase__ )
__lowerCamelCase = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
__lowerCamelCase = model(lowerCamelCase__ )[0]
__lowerCamelCase = [1, 11, 50_265]
self.assertEqual(list(output.shape ) , lowerCamelCase__ )
# compare the actual values for a slice.
__lowerCamelCase = np.array(
[[[40.48_80, 18.01_99, -5.23_67], [-1.88_77, -4.08_85, 10.70_85], [-2.26_13, -5.61_10, 7.26_65]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
@slow
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase__ )
__lowerCamelCase = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
__lowerCamelCase = model(lowerCamelCase__ )[0]
# compare the actual values for a slice.
__lowerCamelCase = np.array(
[[[0.02_08, -0.03_56, 0.02_37], [-0.15_69, -0.04_11, -0.26_26], [0.18_79, 0.01_25, -0.00_89]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 348 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__A = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["CLIPFeatureExtractor"]
__A = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 348 |
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
| 348 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
__A = None
__A = logging.get_logger(__name__)
__A = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__A = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''',
},
}
__A = {
'''camembert-base''': 5_12,
}
__A = '''▁'''
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ['''input_ids''', '''attention_mask''']
snake_case_ = CamembertTokenizer
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<mask>" , lowerCamelCase__=["<s>NOTUSED", "</s>NOTUSED"] , **lowerCamelCase__ , ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
super().__init__(
__a , tokenizer_file=__a , bos_token=__a , eos_token=__a , sep_token=__a , cls_token=__a , unk_token=__a , pad_token=__a , mask_token=__a , additional_special_tokens=__a , **__a , )
__lowerCamelCase = vocab_file
__lowerCamelCase = False if not self.vocab_file else True
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
__lowerCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[int]:
'''simple docstring'''
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(__a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCamelCase = os.path.join(
__a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ):
copyfile(self.vocab_file , __a )
return (out_vocab_file,)
| 350 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def lowercase_ ( self , lowerCamelCase__=0 ) -> int:
'''simple docstring'''
__lowerCamelCase = floats_tensor((1, 3, 128, 128) , rng=random.Random(lowerCamelCase__ ) )
__lowerCamelCase = np.random.RandomState(lowerCamelCase__ )
__lowerCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__lowerCamelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__lowerCamelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
# warmup pass to apply optimizations
__lowerCamelCase = pipe(**self.get_dummy_inputs() )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__lowerCamelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__lowerCamelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__lowerCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def lowercase_ ( self ) -> int:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = ort.SessionOptions()
__lowerCamelCase = False
return options
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
__lowerCamelCase = init_image.resize((768, 512) )
# using the PNDM scheduler by default
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = 'A fantasy landscape, trending on artstation'
__lowerCamelCase = np.random.RandomState(0 )
__lowerCamelCase = pipe(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCamelCase__ , output_type='np' , )
__lowerCamelCase = output.images
__lowerCamelCase = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__lowerCamelCase = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
__lowerCamelCase = init_image.resize((768, 512) )
__lowerCamelCase = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=lowerCamelCase__ , safety_checker=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = 'A fantasy landscape, trending on artstation'
__lowerCamelCase = np.random.RandomState(0 )
__lowerCamelCase = pipe(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=lowerCamelCase__ , output_type='np' , )
__lowerCamelCase = output.images
__lowerCamelCase = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__lowerCamelCase = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 348 | 0 |
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : bool = False ) -> str:
"""simple docstring"""
if not isinstance(lowercase__ , lowercase__ ):
__lowerCamelCase = F"""Expected string as input, found {type(lowercase__ )}"""
raise ValueError(lowercase__ )
if not isinstance(lowercase__ , lowercase__ ):
__lowerCamelCase = F"""Expected boolean as use_pascal parameter, found {type(lowercase__ )}"""
raise ValueError(lowercase__ )
__lowerCamelCase = input_str.split('_' )
__lowerCamelCase = 0 if use_pascal else 1
__lowerCamelCase = words[start_index:]
__lowerCamelCase = [word[0].upper() + word[1:] for word in words_to_capitalize]
__lowerCamelCase = """""" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 351 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__A = logging.get_logger(__name__)
__A = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
__A = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_57, 3_66, 4_38, 5_32, 6_85,
7_05, 7_96, 9_30, 10_58, 12_20, 12_67, 12_79, 13_03, 13_43, 13_77,
13_91, 16_35, 17_82, 18_75, 21_62, 23_61, 24_88, 34_67, 40_08, 42_11,
46_00, 48_08, 52_99, 58_55, 63_29, 72_03, 96_09, 99_59, 1_05_63, 1_07_86,
1_14_20, 1_17_09, 1_19_07, 1_31_63, 1_36_97, 1_37_00, 1_48_08, 1_53_06, 1_64_10, 1_67_91,
1_79_92, 1_92_03, 1_95_10, 2_07_24, 2_23_05, 2_29_35, 2_70_07, 3_01_09, 3_04_20, 3_34_09,
3_49_49, 4_02_83, 4_04_93, 4_05_49, 4_72_82, 4_91_46, 5_02_57, 5_03_59, 5_03_60, 5_03_61
]
__A = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_59, 5_03, 5_22, 5_42, 8_73,
8_93, 9_02, 9_18, 9_22, 9_31, 13_50, 18_53, 19_82, 24_60, 26_27,
32_46, 32_53, 32_68, 35_36, 38_46, 39_61, 41_83, 46_67, 65_85, 66_47,
72_73, 90_61, 93_83, 1_04_28, 1_09_29, 1_19_38, 1_20_33, 1_23_31, 1_25_62, 1_37_93,
1_41_57, 1_46_35, 1_52_65, 1_56_18, 1_65_53, 1_66_04, 1_83_62, 1_89_56, 2_00_75, 2_16_75,
2_25_20, 2_61_30, 2_61_61, 2_64_35, 2_82_79, 2_94_64, 3_16_50, 3_23_02, 3_24_70, 3_68_65,
4_28_63, 4_74_25, 4_98_70, 5_02_54, 5_02_58, 5_03_60, 5_03_61, 5_03_62
]
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''whisper'''
snake_case_ = ['''past_key_values''']
snake_case_ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowerCamelCase__=51_865 , lowerCamelCase__=80 , lowerCamelCase__=6 , lowerCamelCase__=4 , lowerCamelCase__=6 , lowerCamelCase__=4 , lowerCamelCase__=1_536 , lowerCamelCase__=1_536 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=50_257 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__="gelu" , lowerCamelCase__=256 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.02 , lowerCamelCase__=False , lowerCamelCase__=1_500 , lowerCamelCase__=448 , lowerCamelCase__=50_256 , lowerCamelCase__=50_256 , lowerCamelCase__=50_256 , lowerCamelCase__=None , lowerCamelCase__=[220, 50_256] , lowerCamelCase__=False , lowerCamelCase__=256 , lowerCamelCase__=False , lowerCamelCase__=0.05 , lowerCamelCase__=10 , lowerCamelCase__=2 , lowerCamelCase__=0.0 , lowerCamelCase__=10 , lowerCamelCase__=0 , lowerCamelCase__=7 , **lowerCamelCase__ , ) -> str:
'''simple docstring'''
__lowerCamelCase = vocab_size
__lowerCamelCase = num_mel_bins
__lowerCamelCase = d_model
__lowerCamelCase = encoder_layers
__lowerCamelCase = encoder_attention_heads
__lowerCamelCase = decoder_layers
__lowerCamelCase = decoder_attention_heads
__lowerCamelCase = decoder_ffn_dim
__lowerCamelCase = encoder_ffn_dim
__lowerCamelCase = dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = activation_function
__lowerCamelCase = init_std
__lowerCamelCase = encoder_layerdrop
__lowerCamelCase = decoder_layerdrop
__lowerCamelCase = use_cache
__lowerCamelCase = encoder_layers
__lowerCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCamelCase = max_source_positions
__lowerCamelCase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
__lowerCamelCase = classifier_proj_size
__lowerCamelCase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCamelCase = apply_spec_augment
__lowerCamelCase = mask_time_prob
__lowerCamelCase = mask_time_length
__lowerCamelCase = mask_time_min_masks
__lowerCamelCase = mask_feature_prob
__lowerCamelCase = mask_feature_length
__lowerCamelCase = mask_feature_min_masks
__lowerCamelCase = median_filter_width
super().__init__(
pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , is_encoder_decoder=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , suppress_tokens=lowerCamelCase__ , begin_suppress_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
@property
def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
__lowerCamelCase = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
__lowerCamelCase = {0: 'batch'}
else:
__lowerCamelCase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase__ , direction='inputs' )
return common_inputs
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = -1 , lowerCamelCase__ = -1 , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = 22_050 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 220 , ) -> Mapping[str, Any]:
'''simple docstring'''
__lowerCamelCase = OrderedDict()
__lowerCamelCase = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowerCamelCase__ , framework=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , time_duration=lowerCamelCase__ , frequency=lowerCamelCase__ , )
__lowerCamelCase = encoder_inputs['input_features'].shape[2]
__lowerCamelCase = encoder_sequence_length // 2 if self.use_past else seq_length
__lowerCamelCase = super().generate_dummy_inputs(
preprocessor.tokenizer , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = encoder_inputs.pop('input_features' )
__lowerCamelCase = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
__lowerCamelCase = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def lowercase_ ( self ) -> float:
'''simple docstring'''
return 1e-3
| 348 | 0 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
snake_case_ = MgpstrTokenizer
snake_case_ = False
snake_case_ = {}
snake_case_ = False
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
super().setUp()
# fmt: off
__lowerCamelCase = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
__lowerCamelCase = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + '\n' )
def lowercase_ ( self , **lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowercase_ ( self , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = '''tester'''
__lowerCamelCase = '''tester'''
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.' )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = self.get_tokenizers(do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__lowerCamelCase = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'cls_token': special_token} )
__lowerCamelCase = tokenizer.encode([special_token] , add_special_tokens=_UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ) , 1 )
__lowerCamelCase = tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
self.assertTrue(special_token not in decoded )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__lowerCamelCase = self.get_input_output_texts(_UpperCAmelCase )
__lowerCamelCase = tokenizer.tokenize(_UpperCAmelCase )
__lowerCamelCase = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
__lowerCamelCase = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertNotEqual(len(_UpperCAmelCase ) , 0 )
__lowerCamelCase = tokenizer.decode(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(text_a.replace(' ' , '' ) , _UpperCAmelCase )
@unittest.skip('MGP-STR tokenizer only handles one sequence.' )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
pass
| 352 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=14 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=0.02 , ) -> int:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = rotary_dim
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = initializer_range
__lowerCamelCase = None
__lowerCamelCase = vocab_size - 1
__lowerCamelCase = vocab_size - 1
__lowerCamelCase = vocab_size - 1
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowerCamelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
__lowerCamelCase = 20
__lowerCamelCase = model_class_name(lowerCamelCase__ )
__lowerCamelCase = model.init_cache(input_ids.shape[0] , lowerCamelCase__ )
__lowerCamelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
__lowerCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCamelCase = model(
input_ids[:, :-1] , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , position_ids=lowerCamelCase__ , )
__lowerCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCamelCase = model(
input_ids[:, -1:] , attention_mask=lowerCamelCase__ , past_key_values=outputs_cache.past_key_values , position_ids=lowerCamelCase__ , )
__lowerCamelCase = model(lowerCamelCase__ )
__lowerCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = 20
__lowerCamelCase = model_class_name(lowerCamelCase__ )
__lowerCamelCase = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__lowerCamelCase = model.init_cache(input_ids.shape[0] , lowerCamelCase__ )
__lowerCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCamelCase = model(
input_ids[:, :-1] , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , position_ids=lowerCamelCase__ , )
__lowerCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCamelCase = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowerCamelCase__ , position_ids=lowerCamelCase__ , )
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
__lowerCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
snake_case_ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = FlaxGPTJModelTester(self )
def lowercase_ ( self ) -> str:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
@tooslow
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
__lowerCamelCase = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=lowerCamelCase__ , truncation=lowerCamelCase__ )
__lowerCamelCase = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
__lowerCamelCase = False
__lowerCamelCase = model.config.eos_token_id
__lowerCamelCase = jax.jit(model.generate )
__lowerCamelCase = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
__lowerCamelCase = tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
__lowerCamelCase = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@is_pt_flax_cross_test
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCamelCase = getattr(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase = pt_inputs['input_ids'].shape
__lowerCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase__ ):
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = pt_model_class(lowerCamelCase__ ).eval()
__lowerCamelCase = model_class(lowerCamelCase__ , dtype=jnp.floataa )
__lowerCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCamelCase__ )
__lowerCamelCase = fx_state
with torch.no_grad():
__lowerCamelCase = pt_model(**lowerCamelCase__ ).to_tuple()
__lowerCamelCase = fx_model(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase__ )
__lowerCamelCase = model_class.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
__lowerCamelCase = fx_model_loaded(**lowerCamelCase__ ).to_tuple()
self.assertEqual(
len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCamelCase = getattr(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = pt_model_class(lowerCamelCase__ ).eval()
__lowerCamelCase = model_class(lowerCamelCase__ , dtype=jnp.floataa )
__lowerCamelCase = load_flax_weights_in_pytorch_model(lowerCamelCase__ , fx_model.params )
__lowerCamelCase , __lowerCamelCase = pt_inputs['input_ids'].shape
__lowerCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase__ ):
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = 0
__lowerCamelCase = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__lowerCamelCase = pt_model(**lowerCamelCase__ ).to_tuple()
__lowerCamelCase = fx_model(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase__ )
__lowerCamelCase = pt_model_class.from_pretrained(lowerCamelCase__ , from_flax=lowerCamelCase__ )
with torch.no_grad():
__lowerCamelCase = pt_model_loaded(**lowerCamelCase__ ).to_tuple()
self.assertEqual(
len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowerCamelCase = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
__lowerCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
| 348 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
__A = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowerCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
super().__init__()
self.register_modules(unet=__snake_case , scheduler=__snake_case )
@torch.no_grad()
def __call__( self , lowerCamelCase__ = 1 , lowerCamelCase__ = 100 , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = True , ) -> Union[AudioPipelineOutput, Tuple]:
'''simple docstring'''
if audio_length_in_s is None:
__lowerCamelCase = self.unet.config.sample_size / self.unet.config.sample_rate
__lowerCamelCase = audio_length_in_s * self.unet.config.sample_rate
__lowerCamelCase = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"""{audio_length_in_s} is too small. Make sure it\'s bigger or equal to"""
f""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" )
__lowerCamelCase = int(__snake_case )
if sample_size % down_scale_factor != 0:
__lowerCamelCase = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"""
f""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"""
' process.' )
__lowerCamelCase = int(__snake_case )
__lowerCamelCase = next(iter(self.unet.parameters() ) ).dtype
__lowerCamelCase = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(__snake_case , __snake_case ) and len(__snake_case ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(__snake_case )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
__lowerCamelCase = randn_tensor(__snake_case , generator=__snake_case , device=self.device , dtype=__snake_case )
# set step values
self.scheduler.set_timesteps(__snake_case , device=audio.device )
__lowerCamelCase = self.scheduler.timesteps.to(__snake_case )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__lowerCamelCase = self.unet(__snake_case , __snake_case ).sample
# 2. compute previous image: x_t -> t_t-1
__lowerCamelCase = self.scheduler.step(__snake_case , __snake_case , __snake_case ).prev_sample
__lowerCamelCase = audio.clamp(-1 , 1 ).float().cpu().numpy()
__lowerCamelCase = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=__snake_case )
| 353 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
__A = False
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase_ ( self ) -> int:
'''simple docstring'''
return 12
@property
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return 12
@property
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
return 32
@property
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(lowerCamelCase__ )
@property
def lowercase_ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = 12
__lowerCamelCase = 12
__lowerCamelCase = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
__lowerCamelCase = TransformeraDModel(**lowerCamelCase__ )
return model
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = 'cpu'
__lowerCamelCase = self.dummy_vqvae
__lowerCamelCase = self.dummy_text_encoder
__lowerCamelCase = self.dummy_tokenizer
__lowerCamelCase = self.dummy_transformer
__lowerCamelCase = VQDiffusionScheduler(self.num_embed )
__lowerCamelCase = LearnedClassifierFreeSamplingEmbeddings(learnable=lowerCamelCase__ )
__lowerCamelCase = VQDiffusionPipeline(
vqvae=lowerCamelCase__ , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ , transformer=lowerCamelCase__ , scheduler=lowerCamelCase__ , learned_classifier_free_sampling_embeddings=lowerCamelCase__ , )
__lowerCamelCase = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = 'teddy bear playing in the pool'
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
__lowerCamelCase = pipe([prompt] , generator=lowerCamelCase__ , num_inference_steps=2 , output_type='np' )
__lowerCamelCase = output.images
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
__lowerCamelCase = pipe(
[prompt] , generator=lowerCamelCase__ , output_type='np' , return_dict=lowerCamelCase__ , num_inference_steps=2 )[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowerCamelCase = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = 'cpu'
__lowerCamelCase = self.dummy_vqvae
__lowerCamelCase = self.dummy_text_encoder
__lowerCamelCase = self.dummy_tokenizer
__lowerCamelCase = self.dummy_transformer
__lowerCamelCase = VQDiffusionScheduler(self.num_embed )
__lowerCamelCase = LearnedClassifierFreeSamplingEmbeddings(
learnable=lowerCamelCase__ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
__lowerCamelCase = VQDiffusionPipeline(
vqvae=lowerCamelCase__ , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ , transformer=lowerCamelCase__ , scheduler=lowerCamelCase__ , learned_classifier_free_sampling_embeddings=lowerCamelCase__ , )
__lowerCamelCase = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = 'teddy bear playing in the pool'
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
__lowerCamelCase = pipe([prompt] , generator=lowerCamelCase__ , num_inference_steps=2 , output_type='np' )
__lowerCamelCase = output.images
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
__lowerCamelCase = pipe(
[prompt] , generator=lowerCamelCase__ , output_type='np' , return_dict=lowerCamelCase__ , num_inference_steps=2 )[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowerCamelCase = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' )
__lowerCamelCase = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' )
__lowerCamelCase = pipeline.to(lowerCamelCase__ )
pipeline.set_progress_bar_config(disable=lowerCamelCase__ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
__lowerCamelCase = pipeline(
'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=lowerCamelCase__ , output_type='np' , )
__lowerCamelCase = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 348 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__A = logging.get_logger(__name__)
__A = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class __lowerCAmelCase ( a__ ):
"""simple docstring"""
snake_case_ = '''van'''
def __init__( self , lowerCamelCase__=224 , lowerCamelCase__=3 , lowerCamelCase__=[7, 3, 3, 3] , lowerCamelCase__=[4, 2, 2, 2] , lowerCamelCase__=[64, 128, 320, 512] , lowerCamelCase__=[3, 3, 12, 3] , lowerCamelCase__=[8, 8, 4, 4] , lowerCamelCase__="gelu" , lowerCamelCase__=0.02 , lowerCamelCase__=1e-6 , lowerCamelCase__=1e-2 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , **lowerCamelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**_lowerCamelCase )
__lowerCamelCase = image_size
__lowerCamelCase = num_channels
__lowerCamelCase = patch_sizes
__lowerCamelCase = strides
__lowerCamelCase = hidden_sizes
__lowerCamelCase = depths
__lowerCamelCase = mlp_ratios
__lowerCamelCase = hidden_act
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = layer_scale_init_value
__lowerCamelCase = drop_path_rate
__lowerCamelCase = dropout_rate
| 354 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=2 , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=10 , lowerCamelCase__=3 , lowerCamelCase__=32 * 8 , lowerCamelCase__=32 * 8 , lowerCamelCase__=4 , lowerCamelCase__=64 , ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = is_training
__lowerCamelCase = use_auxiliary_loss
__lowerCamelCase = num_queries
__lowerCamelCase = num_channels
__lowerCamelCase = min_size
__lowerCamelCase = max_size
__lowerCamelCase = num_labels
__lowerCamelCase = hidden_dim
__lowerCamelCase = hidden_dim
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCamelCase__ )
__lowerCamelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase__ )
__lowerCamelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase__ ) > 0.5
).float()
__lowerCamelCase = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase__ ) > 0.5).long()
__lowerCamelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
__lowerCamelCase = self.num_queries
__lowerCamelCase = self.num_labels
__lowerCamelCase = [1, 1, 1, 1]
__lowerCamelCase = self.num_channels
__lowerCamelCase = 64
__lowerCamelCase = 128
__lowerCamelCase = self.hidden_dim
__lowerCamelCase = self.hidden_dim
__lowerCamelCase = self.hidden_dim
return config
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
__lowerCamelCase = output.encoder_hidden_states
__lowerCamelCase = output.pixel_decoder_hidden_states
__lowerCamelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , config.decoder_layers )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ) -> Tuple:
'''simple docstring'''
with torch.no_grad():
__lowerCamelCase = MaskaFormerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = MaskaFormerForUniversalSegmentation(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
def comm_check_on_output(lowerCamelCase__ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__lowerCamelCase = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
__lowerCamelCase = model(
pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
snake_case_ = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = MaskaFormerModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCamelCase__ )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
pass
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(lowerCamelCase__ )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
@slow
def lowercase_ ( self ) -> int:
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
__lowerCamelCase = MaskaFormerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = (self.model_tester.min_size,) * 2
__lowerCamelCase = {
'pixel_values': torch.randn((2, 3, *size) , device=lowerCamelCase__ ),
'mask_labels': torch.randn((2, 10, *size) , device=lowerCamelCase__ ),
'class_labels': torch.zeros(2 , 10 , device=lowerCamelCase__ ).long(),
}
__lowerCamelCase = self.model_tester.get_config()
__lowerCamelCase = MaskaFormerForUniversalSegmentation(lowerCamelCase__ ).to(lowerCamelCase__ )
__lowerCamelCase = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
__lowerCamelCase = model(**lowerCamelCase__ , output_attentions=lowerCamelCase__ )
self.assertTrue(outputs.attentions is not None )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
__lowerCamelCase = self.all_model_classes[1]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
__lowerCamelCase = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ).loss
loss.backward()
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = self.all_model_classes[1]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
model.train()
__lowerCamelCase = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
__lowerCamelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__lowerCamelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
__lowerCamelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__lowerCamelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCamelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__A = 1e-4
def lowerCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ )
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ )
__lowerCamelCase = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 384, 384) )
with torch.no_grad():
__lowerCamelCase = model(**lowerCamelCase__ )
__lowerCamelCase = torch.tensor(
[[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
__lowerCamelCase = torch.tensor(
[[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
__lowerCamelCase = torch.tensor(
[[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ )
__lowerCamelCase = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 384, 384) )
with torch.no_grad():
__lowerCamelCase = model(**lowerCamelCase__ )
# masks_queries_logits
__lowerCamelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
__lowerCamelCase = [
[-8.78_39, -9.00_56, -8.81_21],
[-7.41_04, -7.03_13, -6.54_01],
[-6.61_05, -6.34_27, -6.46_75],
]
__lowerCamelCase = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
# class_queries_logits
__lowerCamelCase = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
__lowerCamelCase = torch.tensor(
[
[1.83_24, -8.08_35, -4.19_22],
[0.84_50, -9.00_50, -3.60_53],
[0.30_45, -7.72_93, -3.02_75],
] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
__lowerCamelCase = inputs['pixel_values'].to(lowerCamelCase__ )
__lowerCamelCase = [el.to(lowerCamelCase__ ) for el in inputs['mask_labels']]
__lowerCamelCase = [el.to(lowerCamelCase__ ) for el in inputs['class_labels']]
with torch.no_grad():
__lowerCamelCase = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
| 348 | 0 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __lowerCamelCase ( lowercase__ ):
"""simple docstring"""
snake_case_ = 42
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase__=3 , lowerCamelCase__=3 , lowerCamelCase__=("DownEncoderBlock2D",) , lowerCamelCase__=(64,) , lowerCamelCase__=2 , lowerCamelCase__=32 , lowerCamelCase__="silu" , lowerCamelCase__=True , ) -> List[str]:
'''simple docstring'''
super().__init__()
__lowerCamelCase = layers_per_block
__lowerCamelCase = torch.nn.Convad(
_a , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
__lowerCamelCase = None
__lowerCamelCase = nn.ModuleList([] )
# down
__lowerCamelCase = block_out_channels[0]
for i, down_block_type in enumerate(_a ):
__lowerCamelCase = output_channel
__lowerCamelCase = block_out_channels[i]
__lowerCamelCase = i == len(_a ) - 1
__lowerCamelCase = get_down_block(
_a , num_layers=self.layers_per_block , in_channels=_a , out_channels=_a , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=_a , resnet_groups=_a , attention_head_dim=_a , temb_channels=_a , )
self.down_blocks.append(_a )
# mid
__lowerCamelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=_a , output_scale_factor=1 , resnet_time_scale_shift='default' , attention_head_dim=block_out_channels[-1] , resnet_groups=_a , temb_channels=_a , )
# out
__lowerCamelCase = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=_a , eps=1e-6 )
__lowerCamelCase = nn.SiLU()
__lowerCamelCase = 2 * out_channels if double_z else out_channels
__lowerCamelCase = nn.Convad(block_out_channels[-1] , _a , 3 , padding=1 )
__lowerCamelCase = False
def lowercase_ ( self , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = x
__lowerCamelCase = self.conv_in(_a )
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCamelCase__ ):
def custom_forward(*lowerCamelCase__ ):
return module(*_a )
return custom_forward
# down
if is_torch_version('>=' , '1.11.0' ):
for down_block in self.down_blocks:
__lowerCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(_a ) , _a , use_reentrant=_a )
# middle
__lowerCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , _a , use_reentrant=_a )
else:
for down_block in self.down_blocks:
__lowerCamelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(_a ) , _a )
# middle
__lowerCamelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , _a )
else:
# down
for down_block in self.down_blocks:
__lowerCamelCase = down_block(_a )
# middle
__lowerCamelCase = self.mid_block(_a )
# post-process
__lowerCamelCase = self.conv_norm_out(_a )
__lowerCamelCase = self.conv_act(_a )
__lowerCamelCase = self.conv_out(_a )
return sample
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase__=3 , lowerCamelCase__=3 , lowerCamelCase__=("UpDecoderBlock2D",) , lowerCamelCase__=(64,) , lowerCamelCase__=2 , lowerCamelCase__=32 , lowerCamelCase__="silu" , lowerCamelCase__="group" , ) -> Tuple:
'''simple docstring'''
super().__init__()
__lowerCamelCase = layers_per_block
__lowerCamelCase = nn.Convad(
_a , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
__lowerCamelCase = None
__lowerCamelCase = nn.ModuleList([] )
__lowerCamelCase = in_channels if norm_type == 'spatial' else None
# mid
__lowerCamelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=_a , output_scale_factor=1 , resnet_time_scale_shift='default' if norm_type == 'group' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=_a , temb_channels=_a , )
# up
__lowerCamelCase = list(reversed(_a ) )
__lowerCamelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(_a ):
__lowerCamelCase = output_channel
__lowerCamelCase = reversed_block_out_channels[i]
__lowerCamelCase = i == len(_a ) - 1
__lowerCamelCase = get_up_block(
_a , num_layers=self.layers_per_block + 1 , in_channels=_a , out_channels=_a , prev_output_channel=_a , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=_a , resnet_groups=_a , attention_head_dim=_a , temb_channels=_a , resnet_time_scale_shift=_a , )
self.up_blocks.append(_a )
__lowerCamelCase = output_channel
# out
if norm_type == "spatial":
__lowerCamelCase = SpatialNorm(block_out_channels[0] , _a )
else:
__lowerCamelCase = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=_a , eps=1e-6 )
__lowerCamelCase = nn.SiLU()
__lowerCamelCase = nn.Convad(block_out_channels[0] , _a , 3 , padding=1 )
__lowerCamelCase = False
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=None ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = z
__lowerCamelCase = self.conv_in(_a )
__lowerCamelCase = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCamelCase__ ):
def custom_forward(*lowerCamelCase__ ):
return module(*_a )
return custom_forward
if is_torch_version('>=' , '1.11.0' ):
# middle
__lowerCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , _a , _a , use_reentrant=_a )
__lowerCamelCase = sample.to(_a )
# up
for up_block in self.up_blocks:
__lowerCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(_a ) , _a , _a , use_reentrant=_a )
else:
# middle
__lowerCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , _a , _a )
__lowerCamelCase = sample.to(_a )
# up
for up_block in self.up_blocks:
__lowerCamelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(_a ) , _a , _a )
else:
# middle
__lowerCamelCase = self.mid_block(_a , _a )
__lowerCamelCase = sample.to(_a )
# up
for up_block in self.up_blocks:
__lowerCamelCase = up_block(_a , _a )
# post-process
if latent_embeds is None:
__lowerCamelCase = self.conv_norm_out(_a )
else:
__lowerCamelCase = self.conv_norm_out(_a , _a )
__lowerCamelCase = self.conv_act(_a )
__lowerCamelCase = self.conv_out(_a )
return sample
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__="random" , lowerCamelCase__=False , lowerCamelCase__=True ) -> str:
'''simple docstring'''
super().__init__()
__lowerCamelCase = n_e
__lowerCamelCase = vq_embed_dim
__lowerCamelCase = beta
__lowerCamelCase = legacy
__lowerCamelCase = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
__lowerCamelCase = remap
if self.remap is not None:
self.register_buffer('used' , torch.tensor(np.load(self.remap ) ) )
__lowerCamelCase = self.used.shape[0]
__lowerCamelCase = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
__lowerCamelCase = self.re_embed
__lowerCamelCase = self.re_embed + 1
print(
f"""Remapping {self.n_e} indices to {self.re_embed} indices. """
f"""Using {self.unknown_index} for unknown indices.""" )
else:
__lowerCamelCase = n_e
__lowerCamelCase = sane_index_shape
def lowercase_ ( self , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = inds.shape
assert len(_a ) > 1
__lowerCamelCase = inds.reshape(ishape[0] , -1 )
__lowerCamelCase = self.used.to(_a )
__lowerCamelCase = (inds[:, :, None] == used[None, None, ...]).long()
__lowerCamelCase = match.argmax(-1 )
__lowerCamelCase = match.sum(2 ) < 1
if self.unknown_index == "random":
__lowerCamelCase = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
__lowerCamelCase = self.unknown_index
return new.reshape(_a )
def lowercase_ ( self , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = inds.shape
assert len(_a ) > 1
__lowerCamelCase = inds.reshape(ishape[0] , -1 )
__lowerCamelCase = self.used.to(_a )
if self.re_embed > self.used.shape[0]: # extra token
__lowerCamelCase = 0 # simply set to zero
__lowerCamelCase = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , _a )
return back.reshape(_a )
def lowercase_ ( self , lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = z.permute(0 , 2 , 3 , 1 ).contiguous()
__lowerCamelCase = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
__lowerCamelCase = torch.argmin(torch.cdist(_a , self.embedding.weight ) , dim=1 )
__lowerCamelCase = self.embedding(_a ).view(z.shape )
__lowerCamelCase = None
__lowerCamelCase = None
# compute loss for embedding
if not self.legacy:
__lowerCamelCase = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
__lowerCamelCase = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
__lowerCamelCase = z + (z_q - z).detach()
# reshape back to match original input shape
__lowerCamelCase = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
__lowerCamelCase = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
__lowerCamelCase = self.remap_to_used(_a )
__lowerCamelCase = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
__lowerCamelCase = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
if self.remap is not None:
__lowerCamelCase = indices.reshape(shape[0] , -1 ) # add batch axis
__lowerCamelCase = self.unmap_to_all(_a )
__lowerCamelCase = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
__lowerCamelCase = self.embedding(_a )
if shape is not None:
__lowerCamelCase = z_q.view(_a )
# reshape back to match original input shape
__lowerCamelCase = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __lowerCamelCase ( lowercase__ ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=False ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = parameters
__lowerCamelCase = torch.chunk(_a , 2 , dim=1 )
__lowerCamelCase = torch.clamp(self.logvar , -30.0 , 20.0 )
__lowerCamelCase = deterministic
__lowerCamelCase = torch.exp(0.5 * self.logvar )
__lowerCamelCase = torch.exp(self.logvar )
if self.deterministic:
__lowerCamelCase = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowercase_ ( self , lowerCamelCase__ = None ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = randn_tensor(
self.mean.shape , generator=_a , device=self.parameters.device , dtype=self.parameters.dtype )
__lowerCamelCase = self.mean + self.std * sample
return x
def lowercase_ ( self , lowerCamelCase__=None ) -> Optional[int]:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=[1, 2, 3] ) -> List[Any]:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
__lowerCamelCase = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=_a )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
return self.mean
| 355 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A = {
"facebook/mask2former-swin-small-coco-instance": (
"https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
__A = logging.get_logger(__name__)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''mask2former'''
snake_case_ = ['''swin''']
snake_case_ = {'''hidden_size''': '''hidden_dim'''}
def __init__( self , lowerCamelCase__ = None , lowerCamelCase__ = 256 , lowerCamelCase__ = 256 , lowerCamelCase__ = 256 , lowerCamelCase__ = 1_024 , lowerCamelCase__ = "relu" , lowerCamelCase__ = 6 , lowerCamelCase__ = 10 , lowerCamelCase__ = 8 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = 2_048 , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = 4 , lowerCamelCase__ = 255 , lowerCamelCase__ = 100 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 2.0 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 12_544 , lowerCamelCase__ = 3.0 , lowerCamelCase__ = 0.75 , lowerCamelCase__ = 0.02 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = True , lowerCamelCase__ = [4, 8, 16, 32] , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> Tuple:
'''simple docstring'''
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
__lowerCamelCase = CONFIG_MAPPING['swin'](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=lowerCamelCase__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase = backbone_config.pop('model_type' )
__lowerCamelCase = CONFIG_MAPPING[backbone_model_type]
__lowerCamelCase = config_class.from_dict(lowerCamelCase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
f"""Supported model types: {','.join(self.backbones_supported )}""" )
__lowerCamelCase = backbone_config
__lowerCamelCase = feature_size
__lowerCamelCase = mask_feature_size
__lowerCamelCase = hidden_dim
__lowerCamelCase = encoder_feedforward_dim
__lowerCamelCase = activation_function
__lowerCamelCase = encoder_layers
__lowerCamelCase = decoder_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = dropout
__lowerCamelCase = dim_feedforward
__lowerCamelCase = pre_norm
__lowerCamelCase = enforce_input_projection
__lowerCamelCase = common_stride
__lowerCamelCase = ignore_value
__lowerCamelCase = num_queries
__lowerCamelCase = no_object_weight
__lowerCamelCase = class_weight
__lowerCamelCase = mask_weight
__lowerCamelCase = dice_weight
__lowerCamelCase = train_num_points
__lowerCamelCase = oversample_ratio
__lowerCamelCase = importance_sample_ratio
__lowerCamelCase = init_std
__lowerCamelCase = init_xavier_std
__lowerCamelCase = use_auxiliary_loss
__lowerCamelCase = feature_strides
__lowerCamelCase = output_auxiliary_logits
__lowerCamelCase = decoder_layers
super().__init__(**lowerCamelCase__ )
@classmethod
def lowercase_ ( cls , lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
return cls(
backbone_config=lowerCamelCase__ , **lowerCamelCase__ , )
def lowercase_ ( self ) -> Dict[str, any]:
'''simple docstring'''
__lowerCamelCase = copy.deepcopy(self.__dict__ )
__lowerCamelCase = self.backbone_config.to_dict()
__lowerCamelCase = self.__class__.model_type
return output
| 348 | 0 |
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
__A = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
__A = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
__A = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'] , )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ) -> Any:
'''simple docstring'''
if return_pvalue:
__lowerCamelCase = pearsonr(lowerCamelCase__ , lowerCamelCase__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowerCamelCase__ , lowerCamelCase__ )[0] )}
| 356 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = 42
class __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
"""simple docstring"""
@register_to_config
def __init__( self , lowerCamelCase__ = 32 , lowerCamelCase__ = 64 , lowerCamelCase__ = 20 , lowerCamelCase__ = 768 , lowerCamelCase__=77 , lowerCamelCase__=4 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = "silu" , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "linear" , lowerCamelCase__ = "prd" , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , ) -> Tuple:
'''simple docstring'''
super().__init__()
__lowerCamelCase = num_attention_heads
__lowerCamelCase = attention_head_dim
__lowerCamelCase = num_attention_heads * attention_head_dim
__lowerCamelCase = additional_embeddings
__lowerCamelCase = time_embed_dim or inner_dim
__lowerCamelCase = embedding_proj_dim or embedding_dim
__lowerCamelCase = clip_embed_dim or embedding_dim
__lowerCamelCase = Timesteps(lowerCamelCase__ , lowerCamelCase__ , 0 )
__lowerCamelCase = TimestepEmbedding(lowerCamelCase__ , lowerCamelCase__ , out_dim=lowerCamelCase__ , act_fn=lowerCamelCase__ )
__lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
if embedding_proj_norm_type is None:
__lowerCamelCase = None
elif embedding_proj_norm_type == "layer":
__lowerCamelCase = nn.LayerNorm(lowerCamelCase__ )
else:
raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
__lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
if encoder_hid_proj_type is None:
__lowerCamelCase = None
elif encoder_hid_proj_type == "linear":
__lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
else:
raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
__lowerCamelCase = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , lowerCamelCase__ ) )
if added_emb_type == "prd":
__lowerCamelCase = nn.Parameter(torch.zeros(1 , 1 , lowerCamelCase__ ) )
elif added_emb_type is None:
__lowerCamelCase = None
else:
raise ValueError(
f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
__lowerCamelCase = nn.ModuleList(
[
BasicTransformerBlock(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , dropout=lowerCamelCase__ , activation_fn='gelu' , attention_bias=lowerCamelCase__ , )
for d in range(lowerCamelCase__ )
] )
if norm_in_type == "layer":
__lowerCamelCase = nn.LayerNorm(lowerCamelCase__ )
elif norm_in_type is None:
__lowerCamelCase = None
else:
raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" )
__lowerCamelCase = nn.LayerNorm(lowerCamelCase__ )
__lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 )
causal_attention_mask.triu_(1 )
__lowerCamelCase = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' , lowerCamelCase__ , persistent=lowerCamelCase__ )
__lowerCamelCase = nn.Parameter(torch.zeros(1 , lowerCamelCase__ ) )
__lowerCamelCase = nn.Parameter(torch.zeros(1 , lowerCamelCase__ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowercase_ ( self ) -> Dict[str, AttentionProcessor]:
'''simple docstring'''
__lowerCamelCase = {}
def fn_recursive_add_processors(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if hasattr(lowerCamelCase__ , 'set_processor' ):
__lowerCamelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , lowerCamelCase__ , lowerCamelCase__ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return processors
def lowercase_ ( self , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = len(self.attn_processors.keys() )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(lowerCamelCase__ )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if hasattr(lowerCamelCase__ , 'set_processor' ):
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
module.set_processor(lowerCamelCase__ )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , lowerCamelCase__ , lowerCamelCase__ )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = True , ) -> int:
'''simple docstring'''
__lowerCamelCase = hidden_states.shape[0]
__lowerCamelCase = timestep
if not torch.is_tensor(lowerCamelCase__ ):
__lowerCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(lowerCamelCase__ ) and len(timesteps.shape ) == 0:
__lowerCamelCase = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__lowerCamelCase = timesteps * torch.ones(lowerCamelCase__ , dtype=timesteps.dtype , device=timesteps.device )
__lowerCamelCase = self.time_proj(lowerCamelCase__ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
__lowerCamelCase = timesteps_projected.to(dtype=self.dtype )
__lowerCamelCase = self.time_embedding(lowerCamelCase__ )
if self.embedding_proj_norm is not None:
__lowerCamelCase = self.embedding_proj_norm(lowerCamelCase__ )
__lowerCamelCase = self.embedding_proj(lowerCamelCase__ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
__lowerCamelCase = self.encoder_hidden_states_proj(lowerCamelCase__ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
__lowerCamelCase = self.proj_in(lowerCamelCase__ )
__lowerCamelCase = self.positional_embedding.to(hidden_states.dtype )
__lowerCamelCase = []
__lowerCamelCase = 0
if encoder_hidden_states is not None:
additional_embeds.append(lowerCamelCase__ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
__lowerCamelCase = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
__lowerCamelCase = hidden_states[:, None, :]
__lowerCamelCase = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
__lowerCamelCase = self.prd_embedding.to(hidden_states.dtype ).expand(lowerCamelCase__ , -1 , -1 )
additional_embeds.append(lowerCamelCase__ )
__lowerCamelCase = torch.cat(
lowerCamelCase__ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
__lowerCamelCase = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
__lowerCamelCase = F.pad(
lowerCamelCase__ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
__lowerCamelCase = hidden_states + positional_embeddings
if attention_mask is not None:
__lowerCamelCase = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
__lowerCamelCase = F.pad(lowerCamelCase__ , (0, self.additional_embeddings) , value=0.0 )
__lowerCamelCase = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
__lowerCamelCase = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
__lowerCamelCase = self.norm_in(lowerCamelCase__ )
for block in self.transformer_blocks:
__lowerCamelCase = block(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
__lowerCamelCase = self.norm_out(lowerCamelCase__ )
if self.prd_embedding is not None:
__lowerCamelCase = hidden_states[:, -1]
else:
__lowerCamelCase = hidden_states[:, additional_embeddings_len:]
__lowerCamelCase = self.proj_to_clip_embeddings(lowerCamelCase__ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 348 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {
"configuration_xlm_roberta": [
"XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaConfig",
"XLMRobertaOnnxConfig",
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["XLMRobertaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["XLMRobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaForCausalLM",
"XLMRobertaForMaskedLM",
"XLMRobertaForMultipleChoice",
"XLMRobertaForQuestionAnswering",
"XLMRobertaForSequenceClassification",
"XLMRobertaForTokenClassification",
"XLMRobertaModel",
"XLMRobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMRobertaForCausalLM",
"TFXLMRobertaForMaskedLM",
"TFXLMRobertaForMultipleChoice",
"TFXLMRobertaForQuestionAnswering",
"TFXLMRobertaForSequenceClassification",
"TFXLMRobertaForTokenClassification",
"TFXLMRobertaModel",
"TFXLMRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxXLMRobertaForMaskedLM",
"FlaxXLMRobertaForCausalLM",
"FlaxXLMRobertaForMultipleChoice",
"FlaxXLMRobertaForQuestionAnswering",
"FlaxXLMRobertaForSequenceClassification",
"FlaxXLMRobertaForTokenClassification",
"FlaxXLMRobertaModel",
"FlaxXLMRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 357 |
import sys
from collections import defaultdict
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = []
def lowercase_ ( self , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
return self.node_position[vertex]
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = pos
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowerCamelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowerCamelCase = 2 * start + 1
else:
__lowerCamelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowerCamelCase , __lowerCamelCase = heap[smallest_child], positions[smallest_child]
__lowerCamelCase , __lowerCamelCase = (
heap[start],
positions[start],
)
__lowerCamelCase , __lowerCamelCase = temp, tempa
__lowerCamelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , lowerCamelCase__ )
self.top_to_bottom(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = position[index]
while index != 0:
__lowerCamelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowerCamelCase = heap[parent]
__lowerCamelCase = position[parent]
self.set_position(position[parent] , lowerCamelCase__ )
else:
__lowerCamelCase = val
__lowerCamelCase = temp
self.set_position(lowerCamelCase__ , lowerCamelCase__ )
break
__lowerCamelCase = parent
else:
__lowerCamelCase = val
__lowerCamelCase = temp
self.set_position(lowerCamelCase__ , 0 )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = len(lowerCamelCase__ ) // 2 - 1
for i in range(lowerCamelCase__ , -1 , -1 ):
self.top_to_bottom(lowerCamelCase__ , lowerCamelCase__ , len(lowerCamelCase__ ) , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = positions[0]
__lowerCamelCase = sys.maxsize
self.top_to_bottom(lowerCamelCase__ , 0 , len(lowerCamelCase__ ) , lowerCamelCase__ )
return temp
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = Heap()
__lowerCamelCase = [0] * len(UpperCamelCase__ )
__lowerCamelCase = [-1] * len(UpperCamelCase__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowerCamelCase = [] # Heap of Distance of vertices from their neighboring vertex
__lowerCamelCase = []
for vertex in range(len(UpperCamelCase__ ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCamelCase__ )
heap.node_position.append(UpperCamelCase__ )
__lowerCamelCase = []
__lowerCamelCase = 1
__lowerCamelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowerCamelCase = 0
__lowerCamelCase = distance
heap.heapify(UpperCamelCase__ , UpperCamelCase__ )
for _ in range(1 , len(UpperCamelCase__ ) ):
__lowerCamelCase = heap.delete_minimum(UpperCamelCase__ , UpperCamelCase__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__lowerCamelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCamelCase__ )]
):
__lowerCamelCase = distance
heap.bottom_to_top(
UpperCamelCase__ , heap.get_position(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__A = int(input("Enter number of edges: ").strip())
__A = defaultdict(list)
for _ in range(edges_number):
__A = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 348 | 0 |
import pytest
import datasets
# Import fixture modules as plugins
__A = ['tests.fixtures.files', 'tests.fixtures.hub', 'tests.fixtures.fsspec']
def lowerCamelCase_ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def lowerCamelCase_ ( UpperCamelCase__ : Dict ) -> Any:
"""simple docstring"""
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=lowerCamelCase_ )
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any ) -> Optional[int]:
"""simple docstring"""
__lowerCamelCase = tmp_path_factory.getbasetemp() / """cache"""
__lowerCamelCase = test_hf_cache_home / """datasets"""
__lowerCamelCase = test_hf_cache_home / """metrics"""
__lowerCamelCase = test_hf_cache_home / """modules"""
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(lowerCamelCase_ ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(lowerCamelCase_ ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(lowerCamelCase_ ) )
__lowerCamelCase = test_hf_datasets_cache / """downloads"""
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(lowerCamelCase_ ) )
__lowerCamelCase = test_hf_datasets_cache / """downloads""" / """extracted"""
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(lowerCamelCase_ ) )
@pytest.fixture(autouse=lowerCamelCase_ , scope='session' )
def lowerCamelCase_ ( ) -> List[str]:
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowerCamelCase_ )
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> Tuple:
"""simple docstring"""
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , lowerCamelCase_ )
@pytest.fixture
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> Dict:
"""simple docstring"""
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , lowerCamelCase_ )
| 358 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=__magic_name__ )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = field(default='''question-answering-extractive''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
snake_case_ = Features({'''question''': Value('''string''' ), '''context''': Value('''string''' )} )
snake_case_ = Features(
{
'''answers''': Sequence(
{
'''text''': Value('''string''' ),
'''answer_start''': Value('''int32''' ),
} )
} )
snake_case_ = "question"
snake_case_ = "context"
snake_case_ = "answers"
@property
def lowercase_ ( self ) -> Dict[str, str]:
'''simple docstring'''
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 348 | 0 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowerCamelCase_ ( ):
"""simple docstring"""
__lowerCamelCase = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
__lowerCamelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert('RGB' )
return image
def lowerCamelCase_ ( UpperCamelCase__ : Tuple ):
"""simple docstring"""
__lowerCamelCase = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ):
"""simple docstring"""
__lowerCamelCase = dct.pop(UpperCamelCase__ )
__lowerCamelCase = val
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any ):
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__lowerCamelCase = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" )
__lowerCamelCase = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
__lowerCamelCase = torch.cat((q_bias, torch.zeros_like(UpperCamelCase__ , requires_grad=UpperCamelCase__ ), v_bias) )
__lowerCamelCase = qkv_bias
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ):
"""simple docstring"""
__lowerCamelCase = 364 if 'coco' in model_name else 224
__lowerCamelCase = BlipaVisionConfig(image_size=UpperCamelCase__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__lowerCamelCase = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=UpperCamelCase__ ).to_dict()
elif "opt-6.7b" in model_name:
__lowerCamelCase = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=UpperCamelCase__ ).to_dict()
elif "t5-xl" in model_name:
__lowerCamelCase = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__lowerCamelCase = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
__lowerCamelCase = BlipaConfig(vision_config=UpperCamelCase__ , text_config=UpperCamelCase__ )
return config, image_size
@torch.no_grad()
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : List[Any]=False ):
"""simple docstring"""
__lowerCamelCase = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
__lowerCamelCase = tokenizer('\n' , add_special_tokens=UpperCamelCase__ ).input_ids[0]
__lowerCamelCase , __lowerCamelCase = get_blipa_config(UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
__lowerCamelCase = BlipaForConditionalGeneration(UpperCamelCase__ ).eval()
__lowerCamelCase = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
__lowerCamelCase , __lowerCamelCase = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
__lowerCamelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = load_model_and_preprocess(
name=UpperCamelCase__ , model_type=UpperCamelCase__ , is_eval=UpperCamelCase__ , device=UpperCamelCase__ )
original_model.eval()
print('Done!' )
# update state dict keys
__lowerCamelCase = original_model.state_dict()
__lowerCamelCase = create_rename_keys(UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__lowerCamelCase = state_dict.pop(UpperCamelCase__ )
if key.startswith('Qformer.bert' ):
__lowerCamelCase = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
__lowerCamelCase = key.replace('self' , 'attention' )
if "opt_proj" in key:
__lowerCamelCase = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
__lowerCamelCase = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
__lowerCamelCase = key.replace('opt' , 'language' )
if key.startswith('t5' ):
__lowerCamelCase = key.replace('t5' , 'language' )
__lowerCamelCase = val
# read in qv biases
read_in_q_v_bias(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase , __lowerCamelCase = hf_model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
assert len(UpperCamelCase__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__lowerCamelCase = load_demo_image()
__lowerCamelCase = vis_processors['eval'](UpperCamelCase__ ).unsqueeze(0 ).to(UpperCamelCase__ )
__lowerCamelCase = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(UpperCamelCase__ )
# create processor
__lowerCamelCase = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=UpperCamelCase__ , image_std=UpperCamelCase__ )
__lowerCamelCase = BlipaProcessor(image_processor=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
__lowerCamelCase = processor(images=UpperCamelCase__ , return_tensors='pt' ).pixel_values.to(UpperCamelCase__ )
# make sure processor creates exact same pixel values
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ )
original_model.to(UpperCamelCase__ )
hf_model.to(UpperCamelCase__ )
with torch.no_grad():
if "opt" in model_name:
__lowerCamelCase = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
__lowerCamelCase = hf_model(UpperCamelCase__ , UpperCamelCase__ ).logits
else:
__lowerCamelCase = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
__lowerCamelCase = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
__lowerCamelCase = hf_model(UpperCamelCase__ , UpperCamelCase__ , labels=UpperCamelCase__ ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__lowerCamelCase = torch.tensor(
[[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=UpperCamelCase__ )
assert torch.allclose(logits[0, :3, :3] , UpperCamelCase__ , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__lowerCamelCase = torch.tensor(
[[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=UpperCamelCase__ )
else:
# cast to same type
__lowerCamelCase = logits.dtype
assert torch.allclose(original_logits.to(UpperCamelCase__ ) , UpperCamelCase__ , atol=1E-2 )
print('Looks ok!' )
print('Generating a caption...' )
__lowerCamelCase = ''
__lowerCamelCase = tokenizer(UpperCamelCase__ , return_tensors='pt' ).input_ids.to(UpperCamelCase__ )
__lowerCamelCase = original_model.generate({'image': original_pixel_values} )
__lowerCamelCase = hf_model.generate(
UpperCamelCase__ , UpperCamelCase__ , do_sample=UpperCamelCase__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , UpperCamelCase__ )
__lowerCamelCase = input_ids.shape[1]
__lowerCamelCase = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=UpperCamelCase__ )
__lowerCamelCase = [text.strip() for text in output_text]
print('HF generation:' , UpperCamelCase__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(UpperCamelCase__ )
hf_model.save_pretrained(UpperCamelCase__ )
if push_to_hub:
processor.push_to_hub(F"""nielsr/{model_name}""" )
hf_model.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
__A = [
"""blip2-opt-2.7b""",
"""blip2-opt-6.7b""",
"""blip2-opt-2.7b-coco""",
"""blip2-opt-6.7b-coco""",
"""blip2-flan-t5-xl""",
"""blip2-flan-t5-xl-coco""",
"""blip2-flan-t5-xxl""",
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
__A = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 359 |
import requests
__A = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> None:
"""simple docstring"""
__lowerCamelCase = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['articles'] , 1 ):
print(F"""{i}.) {article['title']}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
| 348 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __lowerCAmelCase ( A_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = ShapEImgaImgPipeline
snake_case_ = ["image"]
snake_case_ = ["image"]
snake_case_ = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
snake_case_ = False
@property
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
return 32
@property
def lowercase_ ( self ) -> Any:
'''simple docstring'''
return 32
@property
def lowercase_ ( self ) -> Any:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
return 8
@property
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
__lowerCamelCase = CLIPVisionModel(snake_case__ )
return model
@property
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = CLIPImageProcessor(
crop_size=224 , do_center_crop=snake_case__ , do_normalize=snake_case__ , do_resize=snake_case__ , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
@property
def lowercase_ ( self ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"embedding_proj_norm_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
__lowerCamelCase = PriorTransformer(**snake_case__ )
return model
@property
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
__lowerCamelCase = ShapERenderer(**snake_case__ )
return model
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = self.dummy_prior
__lowerCamelCase = self.dummy_image_encoder
__lowerCamelCase = self.dummy_image_processor
__lowerCamelCase = self.dummy_renderer
__lowerCamelCase = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1_024 , prediction_type='sample' , use_karras_sigmas=snake_case__ , clip_sample=snake_case__ , clip_sample_range=1.0 , )
__lowerCamelCase = {
"prior": prior,
"image_encoder": image_encoder,
"image_processor": image_processor,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=0 ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
if str(snake_case__ ).startswith('mps' ):
__lowerCamelCase = torch.manual_seed(snake_case__ )
else:
__lowerCamelCase = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
__lowerCamelCase = {
"image": input_image,
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = "cpu"
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**snake_case__ )
__lowerCamelCase = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
__lowerCamelCase = pipe(**self.get_dummy_inputs(snake_case__ ) )
__lowerCamelCase = output.images[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowerCamelCase = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = torch_device == "cpu"
__lowerCamelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=snake_case__ , relax_max_difference=snake_case__ , )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**snake_case__ )
__lowerCamelCase = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
__lowerCamelCase = 1
__lowerCamelCase = 2
__lowerCamelCase = self.get_dummy_inputs(snake_case__ )
for key in inputs.keys():
if key in self.batch_params:
__lowerCamelCase = batch_size * [inputs[key]]
__lowerCamelCase = pipe(**snake_case__ , num_images_per_prompt=snake_case__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
__lowerCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
__lowerCamelCase = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
__lowerCamelCase = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
__lowerCamelCase = torch.Generator(device=snake_case__ ).manual_seed(0 )
__lowerCamelCase = pipe(
snake_case__ , generator=snake_case__ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 360 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__A = logging.get_logger(__name__)
__A = TypeVar("DatasetType", Dataset, IterableDataset)
def lowerCamelCase_ ( UpperCamelCase__ : List[DatasetType] , UpperCamelCase__ : Optional[List[float]] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[DatasetInfo] = None , UpperCamelCase__ : Optional[NamedSplit] = None , UpperCamelCase__ : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType:
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(UpperCamelCase__ ):
if not isinstance(UpperCamelCase__ , (Dataset, IterableDataset) ):
if isinstance(UpperCamelCase__ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'is an empty dataset dictionary.' )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(UpperCamelCase__ )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(UpperCamelCase__ ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase__ ).__name__}.""" )
if i == 0:
__lowerCamelCase , __lowerCamelCase = (
(Dataset, IterableDataset) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else (IterableDataset, Dataset)
)
elif not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , stopping_strategy=UpperCamelCase__ )
else:
return _interleave_iterable_datasets(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , stopping_strategy=UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : List[DatasetType] , UpperCamelCase__ : Optional[DatasetInfo] = None , UpperCamelCase__ : Optional[NamedSplit] = None , UpperCamelCase__ : int = 0 , ) -> DatasetType:
"""simple docstring"""
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(UpperCamelCase__ ):
if not isinstance(UpperCamelCase__ , (Dataset, IterableDataset) ):
if isinstance(UpperCamelCase__ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'is an empty dataset dictionary.' )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(UpperCamelCase__ )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(UpperCamelCase__ ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase__ ).__name__}.""" )
if i == 0:
__lowerCamelCase , __lowerCamelCase = (
(Dataset, IterableDataset) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else (IterableDataset, Dataset)
)
elif not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , axis=UpperCamelCase__ )
else:
return _concatenate_iterable_datasets(UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , axis=UpperCamelCase__ )
| 348 | 0 |
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowerCamelCase_ ( UpperCamelCase__ : dict ) -> tuple:
"""simple docstring"""
return (data["data"], data["target"])
def lowerCamelCase_ ( UpperCamelCase__ : np.ndarray , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : np.ndarray ) -> np.ndarray:
"""simple docstring"""
__lowerCamelCase = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(lowerCAmelCase__ , lowerCAmelCase__ )
# Predict target for test data
__lowerCamelCase = xgb.predict(lowerCAmelCase__ )
__lowerCamelCase = predictions.reshape(len(lowerCAmelCase__ ) , 1 )
return predictions
def lowerCamelCase_ ( ) -> None:
"""simple docstring"""
__lowerCamelCase = fetch_california_housing()
__lowerCamelCase , __lowerCamelCase = data_handling(lowerCAmelCase__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = train_test_split(
lowerCAmelCase__ , lowerCAmelCase__ , test_size=0.25 , random_state=1 )
__lowerCamelCase = xgboost(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Error printing
print(F"""Mean Absolute Error : {mean_absolute_error(lowerCAmelCase__ , lowerCAmelCase__ )}""" )
print(F"""Mean Square Error : {mean_squared_error(lowerCAmelCase__ , lowerCAmelCase__ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 361 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = ["model.decoder.embed_positions.weights"]
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> List[Any]:
"""simple docstring"""
if "emb" in name:
__lowerCamelCase = name.replace('emb' , 'model.decoder.embed_tokens' )
if "transformer" in name:
__lowerCamelCase = name.replace('transformer' , 'model.decoder' )
if "cross_attention" in name:
__lowerCamelCase = name.replace('cross_attention' , 'encoder_attn' )
if "linear1" in name:
__lowerCamelCase = name.replace('linear1' , 'fc1' )
if "linear2" in name:
__lowerCamelCase = name.replace('linear2' , 'fc2' )
if "norm1" in name:
__lowerCamelCase = name.replace('norm1' , 'self_attn_layer_norm' )
if "norm_cross" in name:
__lowerCamelCase = name.replace('norm_cross' , 'encoder_attn_layer_norm' )
if "norm2" in name:
__lowerCamelCase = name.replace('norm2' , 'final_layer_norm' )
if "out_norm" in name:
__lowerCamelCase = name.replace('out_norm' , 'model.decoder.layer_norm' )
if "linears" in name:
__lowerCamelCase = name.replace('linears' , 'lm_heads' )
if "condition_provider.conditioners.description.output_proj" in name:
__lowerCamelCase = name.replace('condition_provider.conditioners.description.output_proj' , 'enc_to_dec_proj' )
return name
def lowerCamelCase_ ( UpperCamelCase__ : OrderedDict , UpperCamelCase__ : int ) -> Tuple[Dict, Dict]:
"""simple docstring"""
__lowerCamelCase = list(state_dict.keys() )
__lowerCamelCase = {}
for key in keys:
__lowerCamelCase = state_dict.pop(UpperCamelCase__ )
__lowerCamelCase = rename_keys(UpperCamelCase__ )
if "in_proj_weight" in key:
# split fused qkv proj
__lowerCamelCase = val[:hidden_size, :]
__lowerCamelCase = val[hidden_size : 2 * hidden_size, :]
__lowerCamelCase = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__lowerCamelCase = val
else:
__lowerCamelCase = val
return state_dict, enc_dec_proj_state_dict
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
__lowerCamelCase = 1024
__lowerCamelCase = 24
__lowerCamelCase = 16
elif checkpoint == "medium":
__lowerCamelCase = 1536
__lowerCamelCase = 48
__lowerCamelCase = 24
elif checkpoint == "large":
__lowerCamelCase = 2048
__lowerCamelCase = 48
__lowerCamelCase = 32
else:
raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
__lowerCamelCase = MusicgenDecoderConfig(
hidden_size=UpperCamelCase__ , ffn_dim=hidden_size * 4 , num_hidden_layers=UpperCamelCase__ , num_attention_heads=UpperCamelCase__ , )
return config
@torch.no_grad()
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Optional[int]="cpu" ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = MusicGen.get_pretrained(UpperCamelCase__ , device=UpperCamelCase__ )
__lowerCamelCase = decoder_config_from_checkpoint(UpperCamelCase__ )
__lowerCamelCase = fairseq_model.lm.state_dict()
__lowerCamelCase , __lowerCamelCase = rename_state_dict(
UpperCamelCase__ , hidden_size=decoder_config.hidden_size )
__lowerCamelCase = TaEncoderModel.from_pretrained('t5-base' )
__lowerCamelCase = EncodecModel.from_pretrained('facebook/encodec_32khz' )
__lowerCamelCase = MusicgenForCausalLM(UpperCamelCase__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__lowerCamelCase , __lowerCamelCase = decoder.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
for key in missing_keys.copy():
if key.startswith(('text_encoder', 'audio_encoder') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(UpperCamelCase__ ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
__lowerCamelCase = MusicgenForConditionalGeneration(text_encoder=UpperCamelCase__ , audio_encoder=UpperCamelCase__ , decoder=UpperCamelCase__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(UpperCamelCase__ )
# check we can do a forward pass
__lowerCamelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__lowerCamelCase = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__lowerCamelCase = model(input_ids=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('Incorrect shape for logits' )
# now construct the processor
__lowerCamelCase = AutoTokenizer.from_pretrained('t5-base' )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained('facebook/encodec_32khz' , padding_side='left' )
__lowerCamelCase = MusicgenProcessor(feature_extractor=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
# set the appropriate bos/pad token ids
__lowerCamelCase = 2048
__lowerCamelCase = 2048
# set other default generation config params
__lowerCamelCase = int(30 * audio_encoder.config.frame_rate )
__lowerCamelCase = True
__lowerCamelCase = 3.0
if pytorch_dump_folder is not None:
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(UpperCamelCase__ )
processor.push_to_hub(UpperCamelCase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
__A = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 348 | 0 |
import requests
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : Dict ) -> List[str]:
"""simple docstring"""
__lowerCamelCase = {'Content-Type': 'application/json'}
__lowerCamelCase = requests.post(lowercase__ , json={'text': message_body} , headers=lowercase__ )
if response.status_code != 200:
__lowerCamelCase = (
'Request to slack returned an error '
F"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(lowercase__ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>")
| 362 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''sew-d'''
def __init__( self , lowerCamelCase__=32 , lowerCamelCase__=768 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__=3_072 , lowerCamelCase__=2 , lowerCamelCase__=512 , lowerCamelCase__=256 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=("p2c", "c2p") , lowerCamelCase__="layer_norm" , lowerCamelCase__="gelu_python" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.0 , lowerCamelCase__=0.1 , lowerCamelCase__=0.02 , lowerCamelCase__=1e-7 , lowerCamelCase__=1e-5 , lowerCamelCase__="group" , lowerCamelCase__="gelu" , lowerCamelCase__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowerCamelCase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCamelCase__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCamelCase__=False , lowerCamelCase__=128 , lowerCamelCase__=16 , lowerCamelCase__=True , lowerCamelCase__=0.05 , lowerCamelCase__=10 , lowerCamelCase__=2 , lowerCamelCase__=0.0 , lowerCamelCase__=10 , lowerCamelCase__=0 , lowerCamelCase__="mean" , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=256 , lowerCamelCase__=0 , lowerCamelCase__=1 , lowerCamelCase__=2 , **lowerCamelCase__ , ) -> Any:
'''simple docstring'''
super().__init__(**lowerCamelCase__ , pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ )
__lowerCamelCase = hidden_size
__lowerCamelCase = feat_extract_norm
__lowerCamelCase = feat_extract_activation
__lowerCamelCase = list(lowerCamelCase__ )
__lowerCamelCase = list(lowerCamelCase__ )
__lowerCamelCase = list(lowerCamelCase__ )
__lowerCamelCase = conv_bias
__lowerCamelCase = num_conv_pos_embeddings
__lowerCamelCase = num_conv_pos_embedding_groups
__lowerCamelCase = len(self.conv_dim )
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = intermediate_size
__lowerCamelCase = squeeze_factor
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = position_buckets
__lowerCamelCase = share_att_key
__lowerCamelCase = relative_attention
__lowerCamelCase = norm_rel_ebd
__lowerCamelCase = list(lowerCamelCase__ )
__lowerCamelCase = hidden_act
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = feat_proj_dropout
__lowerCamelCase = final_dropout
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = feature_layer_norm_eps
__lowerCamelCase = initializer_range
__lowerCamelCase = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCamelCase = apply_spec_augment
__lowerCamelCase = mask_time_prob
__lowerCamelCase = mask_time_length
__lowerCamelCase = mask_time_min_masks
__lowerCamelCase = mask_feature_prob
__lowerCamelCase = mask_feature_length
__lowerCamelCase = mask_feature_min_masks
# ctc loss
__lowerCamelCase = ctc_loss_reduction
__lowerCamelCase = ctc_zero_infinity
# sequence classification
__lowerCamelCase = use_weighted_layer_sum
__lowerCamelCase = classifier_proj_size
@property
def lowercase_ ( self ) -> Any:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 348 | 0 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class __lowerCAmelCase ( __lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case_ = FlaxAutoencoderKL
@property
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = 4
__lowerCamelCase = 3
__lowerCamelCase = (32, 32)
__lowerCamelCase = jax.random.PRNGKey(0 )
__lowerCamelCase = jax.random.uniform(lowerCamelCase__ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
__lowerCamelCase = self.dummy_input
return init_dict, inputs_dict
| 363 |
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__A = logging.get_logger("transformers.models.speecht5")
__A = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
__A = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
__A = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
__A = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
__A = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
__A = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
__A = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
__A = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
__A = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__A = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__A = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__A = []
__A = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
__A = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
__A = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
__A = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ) -> Dict:
"""simple docstring"""
for attribute in key.split('.' ):
__lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ )
if weight_type is not None:
__lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ ).shape
else:
__lowerCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__lowerCamelCase = value
elif weight_type == "weight_g":
__lowerCamelCase = value
elif weight_type == "weight_v":
__lowerCamelCase = value
elif weight_type == "bias":
__lowerCamelCase = value
elif weight_type == "running_mean":
__lowerCamelCase = value
elif weight_type == "running_var":
__lowerCamelCase = value
elif weight_type == "num_batches_tracked":
__lowerCamelCase = value
else:
__lowerCamelCase = value
logger.info(F"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict ) -> Any:
"""simple docstring"""
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
__lowerCamelCase , __lowerCamelCase = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowerCamelCase = []
if task == "s2t":
__lowerCamelCase = hf_model.speechta.encoder.prenet.feature_encoder
__lowerCamelCase = MAPPING_S2T
__lowerCamelCase = IGNORE_KEYS_S2T
elif task == "t2s":
__lowerCamelCase = None
__lowerCamelCase = MAPPING_T2S
__lowerCamelCase = IGNORE_KEYS_T2S
elif task == "s2s":
__lowerCamelCase = hf_model.speechta.encoder.prenet.feature_encoder
__lowerCamelCase = MAPPING_S2S
__lowerCamelCase = IGNORE_KEYS_S2S
else:
raise ValueError(F"""Unsupported task: {task}""" )
for name, value in fairseq_dict.items():
if should_ignore(UpperCamelCase__ , UpperCamelCase__ ):
logger.info(F"""{name} was ignored""" )
continue
__lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hf_model.config.feat_extract_norm == 'group' , )
__lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
__lowerCamelCase , __lowerCamelCase = key.split('.*.' )
if prefix in name and suffix in name:
__lowerCamelCase = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
__lowerCamelCase = True
if "*" in mapped_key:
__lowerCamelCase = name.split(UpperCamelCase__ )[0].split('.' )[-2]
__lowerCamelCase = mapped_key.replace('*' , UpperCamelCase__ )
if "weight_g" in name:
__lowerCamelCase = 'weight_g'
elif "weight_v" in name:
__lowerCamelCase = 'weight_v'
elif "bias" in name:
__lowerCamelCase = 'bias'
elif "weight" in name:
__lowerCamelCase = 'weight'
elif "running_mean" in name:
__lowerCamelCase = 'running_mean'
elif "running_var" in name:
__lowerCamelCase = 'running_var'
elif "num_batches_tracked" in name:
__lowerCamelCase = 'num_batches_tracked'
else:
__lowerCamelCase = None
set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
continue
if not is_used:
unused_weights.append(UpperCamelCase__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : List[str] ) -> Tuple:
"""simple docstring"""
__lowerCamelCase = full_name.split('conv_layers.' )[-1]
__lowerCamelCase = name.split('.' )
__lowerCamelCase = int(items[0] )
__lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCamelCase__ )
@torch.no_grad()
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : str=None , UpperCamelCase__ : List[str]=None , ) -> Tuple:
"""simple docstring"""
if config_path is not None:
__lowerCamelCase = SpeechTaConfig.from_pretrained(UpperCamelCase__ )
else:
__lowerCamelCase = SpeechTaConfig()
if task == "s2t":
__lowerCamelCase = config.max_text_positions
__lowerCamelCase = SpeechTaForSpeechToText(UpperCamelCase__ )
elif task == "t2s":
__lowerCamelCase = 1876
__lowerCamelCase = 600
__lowerCamelCase = config.max_speech_positions
__lowerCamelCase = SpeechTaForTextToSpeech(UpperCamelCase__ )
elif task == "s2s":
__lowerCamelCase = 1876
__lowerCamelCase = config.max_speech_positions
__lowerCamelCase = SpeechTaForSpeechToSpeech(UpperCamelCase__ )
else:
raise ValueError(F"""Unknown task name: {task}""" )
if vocab_path:
__lowerCamelCase = SpeechTaTokenizer(UpperCamelCase__ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
__lowerCamelCase = AddedToken('<mask>' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
__lowerCamelCase = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
__lowerCamelCase = SpeechTaFeatureExtractor()
__lowerCamelCase = SpeechTaProcessor(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
__lowerCamelCase = torch.load(UpperCamelCase__ )
recursively_load_weights(fairseq_checkpoint['model'] , UpperCamelCase__ , UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
if repo_id:
print('Pushing to the hub...' )
processor.push_to_hub(UpperCamelCase__ )
model.push_to_hub(UpperCamelCase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
__A = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 348 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json",
}
class __lowerCAmelCase ( a_ ):
"""simple docstring"""
snake_case_ = '''luke'''
def __init__( self , lowerCamelCase__=50_267 , lowerCamelCase__=500_000 , lowerCamelCase__=768 , lowerCamelCase__=256 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__=3_072 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=1e-12 , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=1 , lowerCamelCase__=0 , lowerCamelCase__=2 , **lowerCamelCase__ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
__lowerCamelCase = vocab_size
__lowerCamelCase = entity_vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = entity_emb_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = use_entity_aware_attention
__lowerCamelCase = classifier_dropout
| 364 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = [False] * len(UpperCamelCase__ )
__lowerCamelCase = [-1] * len(UpperCamelCase__ )
def dfs(UpperCamelCase__ : int , UpperCamelCase__ : int ):
__lowerCamelCase = True
__lowerCamelCase = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCamelCase__ , 1 - c )
for i in range(len(UpperCamelCase__ ) ):
if not visited[i]:
dfs(UpperCamelCase__ , 0 )
for i in range(len(UpperCamelCase__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
__A = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 348 | 0 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__A = 16
__A = 32
def lowerCamelCase_ ( UpperCamelCase__ : Accelerator , UpperCamelCase__ : int = 16 , UpperCamelCase__ : str = "bert-base-cased" ) -> int:
"""simple docstring"""
__lowerCamelCase = AutoTokenizer.from_pretrained(__a )
__lowerCamelCase = load_dataset('glue' , 'mrpc' )
def tokenize_function(UpperCamelCase__ : List[str] ):
# max_length=None => use the model max length (it's actually the default)
__lowerCamelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__a , max_length=__a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowerCamelCase = datasets.map(
__a , batched=__a , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=__a )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCamelCase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(UpperCamelCase__ : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__a , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(__a , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
__lowerCamelCase = DataLoader(
tokenized_datasets['train'] , shuffle=__a , collate_fn=__a , batch_size=__a )
__lowerCamelCase = DataLoader(
tokenized_datasets['validation'] , shuffle=__a , collate_fn=__a , batch_size=__a )
return train_dataloader, eval_dataloader
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : Tuple ) -> List[str]:
"""simple docstring"""
__lowerCamelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCamelCase = config['''lr''']
__lowerCamelCase = int(config['num_epochs'] )
__lowerCamelCase = int(config['seed'] )
__lowerCamelCase = int(config['batch_size'] )
__lowerCamelCase = args.model_name_or_path
set_seed(__a )
__lowerCamelCase = get_dataloaders(__a , __a , __a )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(__a , return_dict=__a )
# Instantiate optimizer
__lowerCamelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__lowerCamelCase = optimizer_cls(params=model.parameters() , lr=__a )
if accelerator.state.deepspeed_plugin is not None:
__lowerCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__lowerCamelCase = 1
__lowerCamelCase = (len(__a ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__lowerCamelCase = get_linear_schedule_with_warmup(
optimizer=__a , num_warmup_steps=0 , num_training_steps=__a , )
else:
__lowerCamelCase = DummyScheduler(__a , total_num_steps=__a , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCamelCase = accelerator.prepare(
__a , __a , __a , __a , __a )
# We need to keep track of how many total steps we have iterated over
__lowerCamelCase = 0
# We also need to keep track of the stating epoch so files are named properly
__lowerCamelCase = 0
# Now we train the model
__lowerCamelCase = evaluate.load('glue' , 'mrpc' )
__lowerCamelCase = 0
__lowerCamelCase = {}
for epoch in range(__a , __a ):
model.train()
for step, batch in enumerate(__a ):
__lowerCamelCase = model(**__a )
__lowerCamelCase = outputs.loss
__lowerCamelCase = loss / gradient_accumulation_steps
accelerator.backward(__a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
__lowerCamelCase = 0
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCamelCase = model(**__a )
__lowerCamelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__lowerCamelCase = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__a ) - 1:
__lowerCamelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__lowerCamelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__a , references=__a , )
__lowerCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , __a )
__lowerCamelCase = eval_metric['''accuracy''']
if best_performance < eval_metric["accuracy"]:
__lowerCamelCase = eval_metric['''accuracy''']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(__a , __a )
def lowerCamelCase_ ( ) -> Tuple:
"""simple docstring"""
__lowerCamelCase = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=__a , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=__a , )
parser.add_argument(
'--output_dir' , type=__a , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=__a , default=__a , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=__a , default=3 , help='Number of train epochs.' , )
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(__a , __a )
if __name__ == "__main__":
main()
| 365 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=lowerCamelCase__ ).to(lowerCamelCase__ )
__lowerCamelCase = AutoTokenizer.from_pretrained('google/mt5-small' )
__lowerCamelCase = tokenizer('Hello there' , return_tensors='pt' ).input_ids
__lowerCamelCase = tokenizer('Hi I am' , return_tensors='pt' ).input_ids
__lowerCamelCase = model(input_ids.to(lowerCamelCase__ ) , labels=labels.to(lowerCamelCase__ ) ).loss
__lowerCamelCase = -(labels.shape[-1] * loss.item())
__lowerCamelCase = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 348 | 0 |
print((lambda quine: quine % quine)("print((lambda quine: quine %% quine)(%r))"))
| 366 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> Any:
"""simple docstring"""
__lowerCamelCase = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2]
__lowerCamelCase = True if 'large' in model_name or 'huge' in model_name else False
__lowerCamelCase = True if 'large' in model_name or 'huge' in model_name else False
__lowerCamelCase = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__lowerCamelCase = [3, 3, 3, 3]
__lowerCamelCase = [5, 5, 5, 5]
elif "fl4" in model_name:
__lowerCamelCase = [4, 4, 4, 4]
__lowerCamelCase = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__lowerCamelCase = [3, 3, 3, 3]
if "lrf" in model_name:
__lowerCamelCase = [3, 3, 3, 3]
else:
__lowerCamelCase = [2, 2, 2, 2]
if "tiny" in model_name:
__lowerCamelCase = 96
elif "small" in model_name:
__lowerCamelCase = 96
elif "base" in model_name:
__lowerCamelCase = 128
elif "large" in model_name:
__lowerCamelCase = 192
elif "xlarge" in model_name:
__lowerCamelCase = 256
elif "huge" in model_name:
__lowerCamelCase = 352
# set label information
__lowerCamelCase = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
__lowerCamelCase = 'imagenet-22k-id2label.json'
else:
__lowerCamelCase = 'imagenet-1k-id2label.json'
__lowerCamelCase = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) )
__lowerCamelCase = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
__lowerCamelCase = {v: k for k, v in idalabel.items()}
__lowerCamelCase = FocalNetConfig(
embed_dim=UpperCamelCase__ , depths=UpperCamelCase__ , focal_levels=UpperCamelCase__ , focal_windows=UpperCamelCase__ , use_conv_embed=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ , use_post_layernorm=UpperCamelCase__ , use_layerscale=UpperCamelCase__ , )
return config
def lowerCamelCase_ ( UpperCamelCase__ : Any ) -> str:
"""simple docstring"""
if "patch_embed.proj" in name:
__lowerCamelCase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__lowerCamelCase = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
__lowerCamelCase = 'encoder.' + name
if "encoder.layers" in name:
__lowerCamelCase = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
__lowerCamelCase = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
__lowerCamelCase = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__lowerCamelCase = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__lowerCamelCase = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__lowerCamelCase = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
__lowerCamelCase = 'layernorm.weight'
if name == "norm.bias":
__lowerCamelCase = 'layernorm.bias'
if "head" in name:
__lowerCamelCase = name.replace('head' , 'classifier' )
else:
__lowerCamelCase = 'focalnet.' + name
return name
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=False ) -> Dict:
"""simple docstring"""
__lowerCamelCase = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
__lowerCamelCase = model_name_to_url[model_name]
print('Checkpoint URL: ' , UpperCamelCase__ )
__lowerCamelCase = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
__lowerCamelCase = state_dict.pop(UpperCamelCase__ )
__lowerCamelCase = val
__lowerCamelCase = get_focalnet_config(UpperCamelCase__ )
__lowerCamelCase = FocalNetForImageClassification(UpperCamelCase__ )
model.eval()
# load state dict
model.load_state_dict(UpperCamelCase__ )
# verify conversion
__lowerCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCamelCase = BitImageProcessor(
do_resize=UpperCamelCase__ , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCamelCase__ , crop_size=224 , do_normalize=UpperCamelCase__ , image_mean=UpperCamelCase__ , image_std=UpperCamelCase__ , )
__lowerCamelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
__lowerCamelCase = processor(images=UpperCamelCase__ , return_tensors='pt' )
__lowerCamelCase = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
__lowerCamelCase = image_transforms(UpperCamelCase__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , UpperCamelCase__ , atol=1E-4 )
__lowerCamelCase = model(**UpperCamelCase__ )
__lowerCamelCase = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__lowerCamelCase = torch.tensor([0.21_66, -0.43_68, 0.21_91] )
elif model_name == "focalnet-tiny-lrf":
__lowerCamelCase = torch.tensor([1.16_69, 0.01_25, -0.16_95] )
elif model_name == "focalnet-small":
__lowerCamelCase = torch.tensor([0.49_17, -0.04_30, 0.13_41] )
elif model_name == "focalnet-small-lrf":
__lowerCamelCase = torch.tensor([-0.25_88, -0.53_42, -0.23_31] )
elif model_name == "focalnet-base":
__lowerCamelCase = torch.tensor([-0.16_55, -0.40_90, -0.17_30] )
elif model_name == "focalnet-base-lrf":
__lowerCamelCase = torch.tensor([0.53_06, -0.04_83, -0.39_28] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print(F"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(F"""{model_name}""" )
processor.push_to_hub(F"""{model_name}""" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="focalnet-tiny",
type=str,
help="Name of the FocalNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub.",
)
__A = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 348 | 0 |
"""simple docstring"""
from __future__ import annotations
__A = "Muhammad Umer Farooq"
__A = "MIT"
__A = "1.0.0"
__A = "Muhammad Umer Farooq"
__A = "[email protected]"
__A = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ ) -> None:
'''simple docstring'''
super().__init__()
__lowerCamelCase = []
__lowerCamelCase = domain
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> None:
'''simple docstring'''
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__lowerCamelCase = parse.urljoin(self.domain , lowerCAmelCase__ )
self.urls.append(lowerCAmelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> Dict:
"""simple docstring"""
return ".".join(get_sub_domain_name(a_ ).split('.' )[-2:] )
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> str:
"""simple docstring"""
return parse.urlparse(a_ ).netloc
def lowerCamelCase_ ( UpperCamelCase__ : str = "https://github.com" ) -> Any:
"""simple docstring"""
__lowerCamelCase = get_domain_name(a_ )
# Initialize the parser
__lowerCamelCase = Parser(a_ )
try:
# Open URL
__lowerCamelCase = requests.get(a_ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__lowerCamelCase = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__lowerCamelCase = requests.get(a_ )
# Get the valid email.
__lowerCamelCase = re.findall('[a-zA-Z0-9]+@' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(a_ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(a_ )
if __name__ == "__main__":
__A = emails_from_url("https://github.com")
print(f'''{len(emails)} emails found:''')
print("\n".join(sorted(emails)))
| 367 |
from __future__ import annotations
def lowerCamelCase_ ( UpperCamelCase__ : list[float] , UpperCamelCase__ : list[float] ) -> float:
"""simple docstring"""
__lowerCamelCase = sorted(numsa + numsa )
__lowerCamelCase , __lowerCamelCase = divmod(len(UpperCamelCase__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = [float(x) for x in input("Enter the elements of first array: ").split()]
__A = [float(x) for x in input("Enter the elements of second array: ").split()]
print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 348 | 0 |
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def lowerCamelCase_ ( ) -> Dict:
"""simple docstring"""
__lowerCamelCase = ArgumentParser('Accelerate CLI tool' , usage='accelerate <command> [<args>]' , allow_abbrev=_UpperCamelCase )
__lowerCamelCase = parser.add_subparsers(help='accelerate command helpers' )
# Register commands
get_config_parser(subparsers=_UpperCamelCase )
env_command_parser(subparsers=_UpperCamelCase )
launch_command_parser(subparsers=_UpperCamelCase )
tpu_command_parser(subparsers=_UpperCamelCase )
test_command_parser(subparsers=_UpperCamelCase )
# Let's go
__lowerCamelCase = parser.parse_args()
if not hasattr(_UpperCamelCase , 'func' ):
parser.print_help()
exit(1 )
# Run
args.func(_UpperCamelCase )
if __name__ == "__main__":
main()
| 368 |
__A = {
"joule": 1.0,
"kilojoule": 10_00,
"megajoule": 1_00_00_00,
"gigajoule": 10_00_00_00_00,
"wattsecond": 1.0,
"watthour": 36_00,
"kilowatthour": 3_60_00_00,
"newtonmeter": 1.0,
"calorie_nutr": 41_86.8,
"kilocalorie_nutr": 4_18_68_00.00,
"electronvolt": 1.6_0_2_1_7_6_6_3_4e-1_9,
"britishthermalunit_it": 10_55.0_55_85,
"footpound": 1.3_5_5_8_1_8,
}
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__lowerCamelCase = (
F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {', '.join(UpperCamelCase__ )}"""
)
raise ValueError(UpperCamelCase__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__A = {
'''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''],
'''tokenization_tapas''': ['''TapasTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TapasForMaskedLM''',
'''TapasForQuestionAnswering''',
'''TapasForSequenceClassification''',
'''TapasModel''',
'''TapasPreTrainedModel''',
'''load_tf_weights_in_tapas''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFTapasForMaskedLM''',
'''TFTapasForQuestionAnswering''',
'''TFTapasForSequenceClassification''',
'''TFTapasModel''',
'''TFTapasPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 369 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''philschmid/bart-large-cnn-samsum'''
snake_case_ = (
'''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '''
'''and returns a summary of the text.'''
)
snake_case_ = '''summarizer'''
snake_case_ = AutoTokenizer
snake_case_ = AutoModelForSeqaSeqLM
snake_case_ = ['''text''']
snake_case_ = ['''text''']
def lowercase_ ( self , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return self.pre_processor(lowerCamelCase__ , return_tensors='pt' , truncation=lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
return self.model.generate(**lowerCamelCase__ )[0]
def lowercase_ ( self , lowerCamelCase__ ) -> Any:
'''simple docstring'''
return self.pre_processor.decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ )
| 348 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = "▁"
__A = {"vocab_file": "sentencepiece.bpe.model"}
__A = {
"vocab_file": {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model",
}
}
__A = {
"facebook/xglm-564M": 20_48,
}
class __lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["input_ids", "attention_mask"]
def __init__( self , lowerCamelCase__ , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
__lowerCamelCase = 7
__lowerCamelCase = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
__lowerCamelCase = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
__lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCAmelCase ) )
__lowerCamelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__lowerCamelCase = 1
# Mimic fairseq token-to-id alignment for the first 4 token
__lowerCamelCase = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
__lowerCamelCase = len(self.sp_model )
__lowerCamelCase = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(_lowerCAmelCase )
__lowerCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = self.__dict__.copy()
__lowerCamelCase = None
__lowerCamelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__lowerCamelCase = {}
__lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Union[str, Any]:
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
__lowerCamelCase = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ) -> Optional[Any]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase ))
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase ))
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase_ ( self , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
def lowercase_ ( self , lowerCamelCase__ ) -> Any:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__lowerCamelCase = self.sp_model.PieceToId(_lowerCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowercase_ ( self , lowerCamelCase__ ) -> int:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowercase_ ( self , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = """""".join(_lowerCAmelCase ).replace(_lowerCAmelCase , ' ' ).strip()
return out_string
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> str:
'''simple docstring'''
if not os.path.isdir(_lowerCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCamelCase = os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , 'wb' ) as fi:
__lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
| 370 |
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=16 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=4 , ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_attention_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_choices
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_attention_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = True
__lowerCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = True
snake_case_ = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowerCamelCase = model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase__ )
__lowerCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase__ )
__lowerCamelCase = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
__lowerCamelCase = model(lowerCamelCase__ )[0]
__lowerCamelCase = [1, 11, 50_265]
self.assertEqual(list(output.shape ) , lowerCamelCase__ )
# compare the actual values for a slice.
__lowerCamelCase = np.array(
[[[40.48_80, 18.01_99, -5.23_67], [-1.88_77, -4.08_85, 10.70_85], [-2.26_13, -5.61_10, 7.26_65]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
@slow
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase__ )
__lowerCamelCase = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
__lowerCamelCase = model(lowerCamelCase__ )[0]
# compare the actual values for a slice.
__lowerCamelCase = np.array(
[[[0.02_08, -0.03_56, 0.02_37], [-0.15_69, -0.04_11, -0.26_26], [0.18_79, 0.01_25, -0.00_89]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 348 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self , lowerCamelCase__ , lowerCamelCase__=7 , lowerCamelCase__=3 , lowerCamelCase__=30 , lowerCamelCase__=400 , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__=[0.5, 0.5, 0.5] , lowerCamelCase__=[0.5, 0.5, 0.5] , lowerCamelCase__=True , lowerCamelCase__=1 / 255 , lowerCamelCase__=True , ) -> int:
'''simple docstring'''
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__lowerCamelCase = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1_333}
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = num_channels
__lowerCamelCase = min_resolution
__lowerCamelCase = max_resolution
__lowerCamelCase = do_resize
__lowerCamelCase = size
__lowerCamelCase = do_normalize
__lowerCamelCase = image_mean
__lowerCamelCase = image_std
__lowerCamelCase = do_rescale
__lowerCamelCase = rescale_factor
__lowerCamelCase = do_pad
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=False ) -> Tuple:
'''simple docstring'''
if not batched:
__lowerCamelCase = image_inputs[0]
if isinstance(lowercase_ , Image.Image ):
__lowerCamelCase = image.size
else:
__lowerCamelCase = image.shape[1], image.shape[2]
if w < h:
__lowerCamelCase = int(self.size['shortest_edge'] * h / w )
__lowerCamelCase = self.size["""shortest_edge"""]
elif w > h:
__lowerCamelCase = self.size["""shortest_edge"""]
__lowerCamelCase = int(self.size['shortest_edge'] * w / h )
else:
__lowerCamelCase = self.size["""shortest_edge"""]
__lowerCamelCase = self.size["""shortest_edge"""]
else:
__lowerCamelCase = []
for image in image_inputs:
__lowerCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCamelCase = max(lowercase_ , key=lambda lowerCamelCase__ : item[0] )[0]
__lowerCamelCase = max(lowercase_ , key=lambda lowerCamelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowerCAmelCase ( _UpperCAmelCase , unittest.TestCase ):
snake_case_ = DeformableDetrImageProcessor if is_vision_available() else None
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = DeformableDetrImageProcessingTester(self )
@property
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , 'image_mean' ) )
self.assertTrue(hasattr(lowercase_ , 'image_std' ) )
self.assertTrue(hasattr(lowercase_ , 'do_normalize' ) )
self.assertTrue(hasattr(lowercase_ , 'do_resize' ) )
self.assertTrue(hasattr(lowercase_ , 'do_rescale' ) )
self.assertTrue(hasattr(lowercase_ , 'do_pad' ) )
self.assertTrue(hasattr(lowercase_ , 'size' ) )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1_333} )
self.assertEqual(image_processor.do_pad , lowercase_ )
__lowerCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowercase_ )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , lowercase_ )
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
pass
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
# Initialize image_processing
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowerCamelCase = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
__lowerCamelCase = image_processing(lowercase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
# Initialize image_processing
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowerCamelCase = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase = image_processing(lowercase_ , return_tensors='pt' ).pixel_values
__lowerCamelCase = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
# Initialize image_processing
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowerCamelCase = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase = image_processing(lowercase_ , return_tensors='pt' ).pixel_values
__lowerCamelCase = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
# prepare image and target
__lowerCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
__lowerCamelCase = json.loads(f.read() )
__lowerCamelCase = {"""image_id""": 39_769, """annotations""": target}
# encode them
__lowerCamelCase = DeformableDetrImageProcessor()
__lowerCamelCase = image_processing(images=lowercase_ , annotations=lowercase_ , return_tensors='pt' )
# verify pixel values
__lowerCamelCase = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape , lowercase_ )
__lowerCamelCase = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowercase_ , atol=1e-4 ) )
# verify area
__lowerCamelCase = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowercase_ ) )
# verify boxes
__lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowercase_ )
__lowerCamelCase = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowercase_ , atol=1e-3 ) )
# verify image_id
__lowerCamelCase = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowercase_ ) )
# verify is_crowd
__lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowercase_ ) )
# verify class_labels
__lowerCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowercase_ ) )
# verify orig_size
__lowerCamelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowercase_ ) )
# verify size
__lowerCamelCase = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowercase_ ) )
@slow
def lowercase_ ( self ) -> int:
'''simple docstring'''
# prepare image, target and masks_path
__lowerCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
__lowerCamelCase = json.loads(f.read() )
__lowerCamelCase = {"""file_name""": """000000039769.png""", """image_id""": 39_769, """segments_info""": target}
__lowerCamelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
__lowerCamelCase = DeformableDetrImageProcessor(format='coco_panoptic' )
__lowerCamelCase = image_processing(images=lowercase_ , annotations=lowercase_ , masks_path=lowercase_ , return_tensors='pt' )
# verify pixel values
__lowerCamelCase = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape , lowercase_ )
__lowerCamelCase = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowercase_ , atol=1e-4 ) )
# verify area
__lowerCamelCase = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowercase_ ) )
# verify boxes
__lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowercase_ )
__lowerCamelCase = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowercase_ , atol=1e-3 ) )
# verify image_id
__lowerCamelCase = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowercase_ ) )
# verify is_crowd
__lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowercase_ ) )
# verify class_labels
__lowerCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowercase_ ) )
# verify masks
__lowerCamelCase = 822_873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , lowercase_ )
# verify orig_size
__lowerCamelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowercase_ ) )
# verify size
__lowerCamelCase = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowercase_ ) )
| 371 |
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
| 348 | 0 |
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class __lowerCAmelCase ( __lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case_ = DownBlockaD # noqa F405
snake_case_ = '''down'''
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = [-0.02_32, -0.98_69, 0.80_54, -0.06_37, -0.16_88, -1.42_64, 0.44_70, -1.33_94, 0.09_04]
super().test_output(_a )
class __lowerCAmelCase ( __lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case_ = ResnetDownsampleBlockaD # noqa F405
snake_case_ = '''down'''
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = [0.07_10, 0.24_10, -0.73_20, -1.07_57, -1.13_43, 0.35_40, -0.01_33, -0.25_76, 0.09_48]
super().test_output(_a )
class __lowerCAmelCase ( __lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case_ = AttnDownBlockaD # noqa F405
snake_case_ = '''down'''
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = [0.06_36, 0.89_64, -0.62_34, -1.01_31, 0.08_44, 0.49_35, 0.34_37, 0.09_11, -0.29_57]
super().test_output(_a )
class __lowerCAmelCase ( __lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case_ = CrossAttnDownBlockaD # noqa F405
snake_case_ = '''down'''
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = super().prepare_init_args_and_inputs_for_common()
__lowerCamelCase = 32
return init_dict, inputs_dict
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = [0.22_38, -0.73_96, -0.22_55, -0.38_29, 0.19_25, 1.16_65, 0.06_03, -0.72_95, 0.19_83]
super().test_output(_a )
class __lowerCAmelCase ( __lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case_ = SimpleCrossAttnDownBlockaD # noqa F405
snake_case_ = '''down'''
@property
def lowercase_ ( self ) -> Any:
'''simple docstring'''
return super().get_dummy_input(include_encoder_hidden_states=_a )
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = super().prepare_init_args_and_inputs_for_common()
__lowerCamelCase = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == 'mps' , 'MPS result is not consistent' )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = [0.79_21, -0.09_92, -0.19_62, -0.76_95, -0.42_42, 0.78_04, 0.47_37, 0.27_65, 0.33_38]
super().test_output(_a )
class __lowerCAmelCase ( __lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case_ = SkipDownBlockaD # noqa F405
snake_case_ = '''down'''
@property
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
return super().get_dummy_input(include_skip_sample=_a )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = [-0.08_45, -0.20_87, -0.24_65, 0.09_71, 0.19_00, -0.04_84, 0.26_64, 0.41_79, 0.50_69]
super().test_output(_a )
class __lowerCAmelCase ( __lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case_ = AttnSkipDownBlockaD # noqa F405
snake_case_ = '''down'''
@property
def lowercase_ ( self ) -> Any:
'''simple docstring'''
return super().get_dummy_input(include_skip_sample=_a )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = [0.55_39, 0.16_09, 0.49_24, 0.05_37, -0.19_95, 0.40_50, 0.09_79, -0.27_21, -0.06_42]
super().test_output(_a )
class __lowerCAmelCase ( __lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case_ = DownEncoderBlockaD # noqa F405
snake_case_ = '''down'''
@property
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return super().get_dummy_input(include_temb=_a )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = {
'in_channels': 32,
'out_channels': 32,
}
__lowerCamelCase = self.dummy_input
return init_dict, inputs_dict
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = [1.11_02, 0.53_02, 0.48_72, -0.00_23, -0.80_42, 0.04_83, -0.34_89, -0.56_32, 0.76_26]
super().test_output(_a )
class __lowerCAmelCase ( __lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case_ = AttnDownEncoderBlockaD # noqa F405
snake_case_ = '''down'''
@property
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
return super().get_dummy_input(include_temb=_a )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = {
'in_channels': 32,
'out_channels': 32,
}
__lowerCamelCase = self.dummy_input
return init_dict, inputs_dict
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = [0.89_66, -0.14_86, 0.85_68, 0.81_41, -0.90_46, -0.13_42, -0.09_72, -0.74_17, 0.15_38]
super().test_output(_a )
class __lowerCAmelCase ( __lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case_ = UNetMidBlockaD # noqa F405
snake_case_ = '''mid'''
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = {
'in_channels': 32,
'temb_channels': 128,
}
__lowerCamelCase = self.dummy_input
return init_dict, inputs_dict
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = [-0.10_62, 1.72_48, 0.34_94, 1.45_69, -0.09_10, -1.24_21, -0.99_84, 0.67_36, 1.00_28]
super().test_output(_a )
class __lowerCAmelCase ( __lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case_ = UNetMidBlockaDCrossAttn # noqa F405
snake_case_ = '''mid'''
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = super().prepare_init_args_and_inputs_for_common()
__lowerCamelCase = 32
return init_dict, inputs_dict
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = [0.01_87, 2.42_20, 0.44_84, 1.12_03, -0.61_21, -1.51_22, -0.82_70, 0.78_51, 1.83_35]
super().test_output(_a )
class __lowerCAmelCase ( __lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case_ = UNetMidBlockaDSimpleCrossAttn # noqa F405
snake_case_ = '''mid'''
@property
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
return super().get_dummy_input(include_encoder_hidden_states=_a )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = super().prepare_init_args_and_inputs_for_common()
__lowerCamelCase = 32
return init_dict, inputs_dict
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = [0.71_43, 1.99_74, 0.54_48, 1.39_77, 0.12_82, -1.12_37, -1.42_38, 0.55_30, 0.88_80]
super().test_output(_a )
class __lowerCAmelCase ( __lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case_ = UpBlockaD # noqa F405
snake_case_ = '''up'''
@property
def lowercase_ ( self ) -> str:
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = [-0.20_41, -0.41_65, -0.30_22, 0.00_41, -0.66_28, -0.70_53, 0.19_28, -0.03_25, 0.05_23]
super().test_output(_a )
class __lowerCAmelCase ( __lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case_ = ResnetUpsampleBlockaD # noqa F405
snake_case_ = '''up'''
@property
def lowercase_ ( self ) -> str:
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = [0.22_87, 0.35_49, -0.13_46, 0.47_97, -0.17_15, -0.96_49, 0.73_05, -0.58_64, -0.62_44]
super().test_output(_a )
class __lowerCAmelCase ( __lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case_ = CrossAttnUpBlockaD # noqa F405
snake_case_ = '''up'''
@property
def lowercase_ ( self ) -> str:
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = super().prepare_init_args_and_inputs_for_common()
__lowerCamelCase = 32
return init_dict, inputs_dict
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = [-0.14_03, -0.35_15, -0.04_20, -0.14_25, 0.31_67, 0.50_94, -0.21_81, 0.59_31, 0.55_82]
super().test_output(_a )
class __lowerCAmelCase ( __lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case_ = SimpleCrossAttnUpBlockaD # noqa F405
snake_case_ = '''up'''
@property
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=_a , include_encoder_hidden_states=_a )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = super().prepare_init_args_and_inputs_for_common()
__lowerCamelCase = 32
return init_dict, inputs_dict
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = [0.26_45, 0.14_80, 0.09_09, 0.80_44, -0.97_58, -0.90_83, 0.09_94, -1.14_53, -0.74_02]
super().test_output(_a )
class __lowerCAmelCase ( __lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case_ = AttnUpBlockaD # noqa F405
snake_case_ = '''up'''
@property
def lowercase_ ( self ) -> Any:
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
@unittest.skipIf(torch_device == 'mps' , 'MPS result is not consistent' )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = [0.09_79, 0.13_26, 0.00_21, 0.06_59, 0.22_49, 0.00_59, 0.11_32, 0.59_52, 0.10_33]
super().test_output(_a )
class __lowerCAmelCase ( __lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case_ = SkipUpBlockaD # noqa F405
snake_case_ = '''up'''
@property
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = [-0.08_93, -0.12_34, -0.15_06, -0.03_32, 0.01_23, -0.02_11, 0.05_66, 0.01_43, 0.03_62]
super().test_output(_a )
class __lowerCAmelCase ( __lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case_ = AttnSkipUpBlockaD # noqa F405
snake_case_ = '''up'''
@property
def lowercase_ ( self ) -> str:
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = [0.03_61, 0.06_17, 0.27_87, -0.03_50, 0.03_42, 0.34_21, -0.08_43, 0.09_13, 0.30_15]
super().test_output(_a )
class __lowerCAmelCase ( __lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case_ = UpDecoderBlockaD # noqa F405
snake_case_ = '''up'''
@property
def lowercase_ ( self ) -> int:
'''simple docstring'''
return super().get_dummy_input(include_temb=_a )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = {'in_channels': 32, 'out_channels': 32}
__lowerCamelCase = self.dummy_input
return init_dict, inputs_dict
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = [0.44_04, 0.19_98, -0.98_86, -0.33_20, -0.31_28, -0.70_34, -0.69_55, -0.23_38, -0.31_37]
super().test_output(_a )
class __lowerCAmelCase ( __lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case_ = AttnUpDecoderBlockaD # noqa F405
snake_case_ = '''up'''
@property
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
return super().get_dummy_input(include_temb=_a )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = {'in_channels': 32, 'out_channels': 32}
__lowerCamelCase = self.dummy_input
return init_dict, inputs_dict
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = [0.67_38, 0.44_91, 0.10_55, 1.07_10, 0.73_16, 0.33_39, 0.33_52, 0.10_23, 0.35_68]
super().test_output(_a )
| 350 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def lowercase_ ( self , lowerCamelCase__=0 ) -> int:
'''simple docstring'''
__lowerCamelCase = floats_tensor((1, 3, 128, 128) , rng=random.Random(lowerCamelCase__ ) )
__lowerCamelCase = np.random.RandomState(lowerCamelCase__ )
__lowerCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__lowerCamelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__lowerCamelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
# warmup pass to apply optimizations
__lowerCamelCase = pipe(**self.get_dummy_inputs() )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__lowerCamelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__lowerCamelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__lowerCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def lowercase_ ( self ) -> int:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = ort.SessionOptions()
__lowerCamelCase = False
return options
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
__lowerCamelCase = init_image.resize((768, 512) )
# using the PNDM scheduler by default
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = 'A fantasy landscape, trending on artstation'
__lowerCamelCase = np.random.RandomState(0 )
__lowerCamelCase = pipe(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCamelCase__ , output_type='np' , )
__lowerCamelCase = output.images
__lowerCamelCase = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__lowerCamelCase = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
__lowerCamelCase = init_image.resize((768, 512) )
__lowerCamelCase = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=lowerCamelCase__ , safety_checker=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = 'A fantasy landscape, trending on artstation'
__lowerCamelCase = np.random.RandomState(0 )
__lowerCamelCase = pipe(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=lowerCamelCase__ , output_type='np' , )
__lowerCamelCase = output.images
__lowerCamelCase = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__lowerCamelCase = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 348 | 0 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowerCamelCase_ ( UpperCamelCase__ : Tuple ) -> Optional[Any]:
"""simple docstring"""
def wrapper(*UpperCamelCase__ : int , **UpperCamelCase__ : Union[str, Any] ):
__lowerCamelCase = timeit.default_timer()
__lowerCamelCase = func(*__lowerCamelCase , **__lowerCamelCase )
__lowerCamelCase = timeit.default_timer() - starttime
return delta
__lowerCamelCase = func.__name__
return wrapper
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any=100 , UpperCamelCase__ : Optional[Any]=None ) -> Optional[int]:
"""simple docstring"""
__lowerCamelCase = []
__lowerCamelCase = seq_shapes or {}
for i in range(__lowerCamelCase ):
__lowerCamelCase = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__lowerCamelCase , _ArrayXD ):
__lowerCamelCase = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__lowerCamelCase , datasets.Value ):
if v.dtype == "string":
__lowerCamelCase = "The small grey turtle was surprisingly fast when challenged."
else:
__lowerCamelCase = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(__lowerCamelCase , datasets.Sequence ):
while isinstance(__lowerCamelCase , datasets.Sequence ):
__lowerCamelCase = v.feature
__lowerCamelCase = seq_shapes[k]
__lowerCamelCase = np.random.rand(*__lowerCamelCase ).astype(v.dtype )
__lowerCamelCase = data
dummy_data.append((i, example) )
return dummy_data
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any]=100 , UpperCamelCase__ : str=None ) -> List[str]:
"""simple docstring"""
__lowerCamelCase = generate_examples(__lowerCamelCase , num_examples=__lowerCamelCase , seq_shapes=__lowerCamelCase )
with ArrowWriter(features=__lowerCamelCase , path=__lowerCamelCase ) as writer:
for key, record in dummy_data:
__lowerCamelCase = features.encode_example(__lowerCamelCase )
writer.write(__lowerCamelCase )
__lowerCamelCase = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" )
__lowerCamelCase = datasets.Dataset.from_file(filename=__lowerCamelCase , info=datasets.DatasetInfo(features=__lowerCamelCase ) )
return dataset
| 351 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__A = logging.get_logger(__name__)
__A = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
__A = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_57, 3_66, 4_38, 5_32, 6_85,
7_05, 7_96, 9_30, 10_58, 12_20, 12_67, 12_79, 13_03, 13_43, 13_77,
13_91, 16_35, 17_82, 18_75, 21_62, 23_61, 24_88, 34_67, 40_08, 42_11,
46_00, 48_08, 52_99, 58_55, 63_29, 72_03, 96_09, 99_59, 1_05_63, 1_07_86,
1_14_20, 1_17_09, 1_19_07, 1_31_63, 1_36_97, 1_37_00, 1_48_08, 1_53_06, 1_64_10, 1_67_91,
1_79_92, 1_92_03, 1_95_10, 2_07_24, 2_23_05, 2_29_35, 2_70_07, 3_01_09, 3_04_20, 3_34_09,
3_49_49, 4_02_83, 4_04_93, 4_05_49, 4_72_82, 4_91_46, 5_02_57, 5_03_59, 5_03_60, 5_03_61
]
__A = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_59, 5_03, 5_22, 5_42, 8_73,
8_93, 9_02, 9_18, 9_22, 9_31, 13_50, 18_53, 19_82, 24_60, 26_27,
32_46, 32_53, 32_68, 35_36, 38_46, 39_61, 41_83, 46_67, 65_85, 66_47,
72_73, 90_61, 93_83, 1_04_28, 1_09_29, 1_19_38, 1_20_33, 1_23_31, 1_25_62, 1_37_93,
1_41_57, 1_46_35, 1_52_65, 1_56_18, 1_65_53, 1_66_04, 1_83_62, 1_89_56, 2_00_75, 2_16_75,
2_25_20, 2_61_30, 2_61_61, 2_64_35, 2_82_79, 2_94_64, 3_16_50, 3_23_02, 3_24_70, 3_68_65,
4_28_63, 4_74_25, 4_98_70, 5_02_54, 5_02_58, 5_03_60, 5_03_61, 5_03_62
]
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''whisper'''
snake_case_ = ['''past_key_values''']
snake_case_ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowerCamelCase__=51_865 , lowerCamelCase__=80 , lowerCamelCase__=6 , lowerCamelCase__=4 , lowerCamelCase__=6 , lowerCamelCase__=4 , lowerCamelCase__=1_536 , lowerCamelCase__=1_536 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=50_257 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__="gelu" , lowerCamelCase__=256 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.02 , lowerCamelCase__=False , lowerCamelCase__=1_500 , lowerCamelCase__=448 , lowerCamelCase__=50_256 , lowerCamelCase__=50_256 , lowerCamelCase__=50_256 , lowerCamelCase__=None , lowerCamelCase__=[220, 50_256] , lowerCamelCase__=False , lowerCamelCase__=256 , lowerCamelCase__=False , lowerCamelCase__=0.05 , lowerCamelCase__=10 , lowerCamelCase__=2 , lowerCamelCase__=0.0 , lowerCamelCase__=10 , lowerCamelCase__=0 , lowerCamelCase__=7 , **lowerCamelCase__ , ) -> str:
'''simple docstring'''
__lowerCamelCase = vocab_size
__lowerCamelCase = num_mel_bins
__lowerCamelCase = d_model
__lowerCamelCase = encoder_layers
__lowerCamelCase = encoder_attention_heads
__lowerCamelCase = decoder_layers
__lowerCamelCase = decoder_attention_heads
__lowerCamelCase = decoder_ffn_dim
__lowerCamelCase = encoder_ffn_dim
__lowerCamelCase = dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = activation_function
__lowerCamelCase = init_std
__lowerCamelCase = encoder_layerdrop
__lowerCamelCase = decoder_layerdrop
__lowerCamelCase = use_cache
__lowerCamelCase = encoder_layers
__lowerCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCamelCase = max_source_positions
__lowerCamelCase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
__lowerCamelCase = classifier_proj_size
__lowerCamelCase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCamelCase = apply_spec_augment
__lowerCamelCase = mask_time_prob
__lowerCamelCase = mask_time_length
__lowerCamelCase = mask_time_min_masks
__lowerCamelCase = mask_feature_prob
__lowerCamelCase = mask_feature_length
__lowerCamelCase = mask_feature_min_masks
__lowerCamelCase = median_filter_width
super().__init__(
pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , is_encoder_decoder=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , suppress_tokens=lowerCamelCase__ , begin_suppress_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
@property
def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
__lowerCamelCase = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
__lowerCamelCase = {0: 'batch'}
else:
__lowerCamelCase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase__ , direction='inputs' )
return common_inputs
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = -1 , lowerCamelCase__ = -1 , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = 22_050 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 220 , ) -> Mapping[str, Any]:
'''simple docstring'''
__lowerCamelCase = OrderedDict()
__lowerCamelCase = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowerCamelCase__ , framework=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , time_duration=lowerCamelCase__ , frequency=lowerCamelCase__ , )
__lowerCamelCase = encoder_inputs['input_features'].shape[2]
__lowerCamelCase = encoder_sequence_length // 2 if self.use_past else seq_length
__lowerCamelCase = super().generate_dummy_inputs(
preprocessor.tokenizer , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = encoder_inputs.pop('input_features' )
__lowerCamelCase = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
__lowerCamelCase = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def lowercase_ ( self ) -> float:
'''simple docstring'''
return 1e-3
| 348 | 0 |
from math import factorial
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
"""simple docstring"""
if n < k or k < 0:
raise ValueError('Please enter positive integers for n and k where n >= k' )
return factorial(_snake_case ) // (factorial(_snake_case ) * factorial(n - k ))
if __name__ == "__main__":
print(
"The number of five-card hands possible from a standard",
f'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
"If a class of 40 students must be arranged into groups of",
f'''4 for group projects, there are {combinations(40, 4)} ways''',
"to arrange them.\n",
)
print(
"If 10 teams are competing in a Formula One race, there",
f'''are {combinations(10, 3)} ways that first, second and''',
"third place can be awarded.",
)
| 352 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=14 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=0.02 , ) -> int:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = rotary_dim
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = initializer_range
__lowerCamelCase = None
__lowerCamelCase = vocab_size - 1
__lowerCamelCase = vocab_size - 1
__lowerCamelCase = vocab_size - 1
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowerCamelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
__lowerCamelCase = 20
__lowerCamelCase = model_class_name(lowerCamelCase__ )
__lowerCamelCase = model.init_cache(input_ids.shape[0] , lowerCamelCase__ )
__lowerCamelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
__lowerCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCamelCase = model(
input_ids[:, :-1] , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , position_ids=lowerCamelCase__ , )
__lowerCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCamelCase = model(
input_ids[:, -1:] , attention_mask=lowerCamelCase__ , past_key_values=outputs_cache.past_key_values , position_ids=lowerCamelCase__ , )
__lowerCamelCase = model(lowerCamelCase__ )
__lowerCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = 20
__lowerCamelCase = model_class_name(lowerCamelCase__ )
__lowerCamelCase = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__lowerCamelCase = model.init_cache(input_ids.shape[0] , lowerCamelCase__ )
__lowerCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCamelCase = model(
input_ids[:, :-1] , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , position_ids=lowerCamelCase__ , )
__lowerCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCamelCase = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowerCamelCase__ , position_ids=lowerCamelCase__ , )
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
__lowerCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
snake_case_ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = FlaxGPTJModelTester(self )
def lowercase_ ( self ) -> str:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
@tooslow
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
__lowerCamelCase = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=lowerCamelCase__ , truncation=lowerCamelCase__ )
__lowerCamelCase = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
__lowerCamelCase = False
__lowerCamelCase = model.config.eos_token_id
__lowerCamelCase = jax.jit(model.generate )
__lowerCamelCase = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
__lowerCamelCase = tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
__lowerCamelCase = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@is_pt_flax_cross_test
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCamelCase = getattr(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase = pt_inputs['input_ids'].shape
__lowerCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase__ ):
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = pt_model_class(lowerCamelCase__ ).eval()
__lowerCamelCase = model_class(lowerCamelCase__ , dtype=jnp.floataa )
__lowerCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCamelCase__ )
__lowerCamelCase = fx_state
with torch.no_grad():
__lowerCamelCase = pt_model(**lowerCamelCase__ ).to_tuple()
__lowerCamelCase = fx_model(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase__ )
__lowerCamelCase = model_class.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
__lowerCamelCase = fx_model_loaded(**lowerCamelCase__ ).to_tuple()
self.assertEqual(
len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCamelCase = getattr(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = pt_model_class(lowerCamelCase__ ).eval()
__lowerCamelCase = model_class(lowerCamelCase__ , dtype=jnp.floataa )
__lowerCamelCase = load_flax_weights_in_pytorch_model(lowerCamelCase__ , fx_model.params )
__lowerCamelCase , __lowerCamelCase = pt_inputs['input_ids'].shape
__lowerCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase__ ):
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = 0
__lowerCamelCase = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__lowerCamelCase = pt_model(**lowerCamelCase__ ).to_tuple()
__lowerCamelCase = fx_model(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase__ )
__lowerCamelCase = pt_model_class.from_pretrained(lowerCamelCase__ , from_flax=lowerCamelCase__ )
with torch.no_grad():
__lowerCamelCase = pt_model_loaded(**lowerCamelCase__ ).to_tuple()
self.assertEqual(
len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowerCamelCase = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
__lowerCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
| 348 | 0 |
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ) -> Dict:
"""simple docstring"""
while second != 0:
__lowerCamelCase = first & second
first ^= second
__lowerCamelCase = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = int(input("Enter the first number: ").strip())
__A = int(input("Enter the second number: ").strip())
print(f'''{add(first, second) = }''')
| 353 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
__A = False
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase_ ( self ) -> int:
'''simple docstring'''
return 12
@property
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return 12
@property
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
return 32
@property
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(lowerCamelCase__ )
@property
def lowercase_ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = 12
__lowerCamelCase = 12
__lowerCamelCase = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
__lowerCamelCase = TransformeraDModel(**lowerCamelCase__ )
return model
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = 'cpu'
__lowerCamelCase = self.dummy_vqvae
__lowerCamelCase = self.dummy_text_encoder
__lowerCamelCase = self.dummy_tokenizer
__lowerCamelCase = self.dummy_transformer
__lowerCamelCase = VQDiffusionScheduler(self.num_embed )
__lowerCamelCase = LearnedClassifierFreeSamplingEmbeddings(learnable=lowerCamelCase__ )
__lowerCamelCase = VQDiffusionPipeline(
vqvae=lowerCamelCase__ , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ , transformer=lowerCamelCase__ , scheduler=lowerCamelCase__ , learned_classifier_free_sampling_embeddings=lowerCamelCase__ , )
__lowerCamelCase = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = 'teddy bear playing in the pool'
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
__lowerCamelCase = pipe([prompt] , generator=lowerCamelCase__ , num_inference_steps=2 , output_type='np' )
__lowerCamelCase = output.images
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
__lowerCamelCase = pipe(
[prompt] , generator=lowerCamelCase__ , output_type='np' , return_dict=lowerCamelCase__ , num_inference_steps=2 )[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowerCamelCase = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = 'cpu'
__lowerCamelCase = self.dummy_vqvae
__lowerCamelCase = self.dummy_text_encoder
__lowerCamelCase = self.dummy_tokenizer
__lowerCamelCase = self.dummy_transformer
__lowerCamelCase = VQDiffusionScheduler(self.num_embed )
__lowerCamelCase = LearnedClassifierFreeSamplingEmbeddings(
learnable=lowerCamelCase__ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
__lowerCamelCase = VQDiffusionPipeline(
vqvae=lowerCamelCase__ , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ , transformer=lowerCamelCase__ , scheduler=lowerCamelCase__ , learned_classifier_free_sampling_embeddings=lowerCamelCase__ , )
__lowerCamelCase = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = 'teddy bear playing in the pool'
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
__lowerCamelCase = pipe([prompt] , generator=lowerCamelCase__ , num_inference_steps=2 , output_type='np' )
__lowerCamelCase = output.images
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
__lowerCamelCase = pipe(
[prompt] , generator=lowerCamelCase__ , output_type='np' , return_dict=lowerCamelCase__ , num_inference_steps=2 )[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowerCamelCase = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' )
__lowerCamelCase = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' )
__lowerCamelCase = pipeline.to(lowerCamelCase__ )
pipeline.set_progress_bar_config(disable=lowerCamelCase__ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
__lowerCamelCase = pipeline(
'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=lowerCamelCase__ , output_type='np' , )
__lowerCamelCase = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 348 | 0 |
def lowerCamelCase_ ( UpperCamelCase__ : int ) -> bool:
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__lowerCamelCase = F"""Input value of [number={number}] must be an integer"""
raise TypeError(UpperCamelCase__ )
if number < 0:
return False
__lowerCamelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=2 , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=10 , lowerCamelCase__=3 , lowerCamelCase__=32 * 8 , lowerCamelCase__=32 * 8 , lowerCamelCase__=4 , lowerCamelCase__=64 , ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = is_training
__lowerCamelCase = use_auxiliary_loss
__lowerCamelCase = num_queries
__lowerCamelCase = num_channels
__lowerCamelCase = min_size
__lowerCamelCase = max_size
__lowerCamelCase = num_labels
__lowerCamelCase = hidden_dim
__lowerCamelCase = hidden_dim
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCamelCase__ )
__lowerCamelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase__ )
__lowerCamelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase__ ) > 0.5
).float()
__lowerCamelCase = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase__ ) > 0.5).long()
__lowerCamelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
__lowerCamelCase = self.num_queries
__lowerCamelCase = self.num_labels
__lowerCamelCase = [1, 1, 1, 1]
__lowerCamelCase = self.num_channels
__lowerCamelCase = 64
__lowerCamelCase = 128
__lowerCamelCase = self.hidden_dim
__lowerCamelCase = self.hidden_dim
__lowerCamelCase = self.hidden_dim
return config
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
__lowerCamelCase = output.encoder_hidden_states
__lowerCamelCase = output.pixel_decoder_hidden_states
__lowerCamelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , config.decoder_layers )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ) -> Tuple:
'''simple docstring'''
with torch.no_grad():
__lowerCamelCase = MaskaFormerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = MaskaFormerForUniversalSegmentation(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
def comm_check_on_output(lowerCamelCase__ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__lowerCamelCase = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
__lowerCamelCase = model(
pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
snake_case_ = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = MaskaFormerModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCamelCase__ )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
pass
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(lowerCamelCase__ )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
@slow
def lowercase_ ( self ) -> int:
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
__lowerCamelCase = MaskaFormerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = (self.model_tester.min_size,) * 2
__lowerCamelCase = {
'pixel_values': torch.randn((2, 3, *size) , device=lowerCamelCase__ ),
'mask_labels': torch.randn((2, 10, *size) , device=lowerCamelCase__ ),
'class_labels': torch.zeros(2 , 10 , device=lowerCamelCase__ ).long(),
}
__lowerCamelCase = self.model_tester.get_config()
__lowerCamelCase = MaskaFormerForUniversalSegmentation(lowerCamelCase__ ).to(lowerCamelCase__ )
__lowerCamelCase = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
__lowerCamelCase = model(**lowerCamelCase__ , output_attentions=lowerCamelCase__ )
self.assertTrue(outputs.attentions is not None )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
__lowerCamelCase = self.all_model_classes[1]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
__lowerCamelCase = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ).loss
loss.backward()
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = self.all_model_classes[1]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
model.train()
__lowerCamelCase = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
__lowerCamelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__lowerCamelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
__lowerCamelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__lowerCamelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCamelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__A = 1e-4
def lowerCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ )
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ )
__lowerCamelCase = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 384, 384) )
with torch.no_grad():
__lowerCamelCase = model(**lowerCamelCase__ )
__lowerCamelCase = torch.tensor(
[[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
__lowerCamelCase = torch.tensor(
[[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
__lowerCamelCase = torch.tensor(
[[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ )
__lowerCamelCase = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 384, 384) )
with torch.no_grad():
__lowerCamelCase = model(**lowerCamelCase__ )
# masks_queries_logits
__lowerCamelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
__lowerCamelCase = [
[-8.78_39, -9.00_56, -8.81_21],
[-7.41_04, -7.03_13, -6.54_01],
[-6.61_05, -6.34_27, -6.46_75],
]
__lowerCamelCase = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
# class_queries_logits
__lowerCamelCase = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
__lowerCamelCase = torch.tensor(
[
[1.83_24, -8.08_35, -4.19_22],
[0.84_50, -9.00_50, -3.60_53],
[0.30_45, -7.72_93, -3.02_75],
] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
__lowerCamelCase = inputs['pixel_values'].to(lowerCamelCase__ )
__lowerCamelCase = [el.to(lowerCamelCase__ ) for el in inputs['mask_labels']]
__lowerCamelCase = [el.to(lowerCamelCase__ ) for el in inputs['class_labels']]
with torch.no_grad():
__lowerCamelCase = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
| 348 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
__A = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
__A = TaTokenizerFast
__A = {'''configuration_mt5''': ['''MT5Config''', '''MT5OnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''MT5EncoderModel''',
'''MT5ForConditionalGeneration''',
'''MT5ForQuestionAnswering''',
'''MT5Model''',
'''MT5PreTrainedModel''',
'''MT5Stack''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''TFMT5EncoderModel''', '''TFMT5ForConditionalGeneration''', '''TFMT5Model''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''FlaxMT5EncoderModel''', '''FlaxMT5ForConditionalGeneration''', '''FlaxMT5Model''']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
__A = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast},
module_spec=__spec__,
)
| 355 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A = {
"facebook/mask2former-swin-small-coco-instance": (
"https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
__A = logging.get_logger(__name__)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''mask2former'''
snake_case_ = ['''swin''']
snake_case_ = {'''hidden_size''': '''hidden_dim'''}
def __init__( self , lowerCamelCase__ = None , lowerCamelCase__ = 256 , lowerCamelCase__ = 256 , lowerCamelCase__ = 256 , lowerCamelCase__ = 1_024 , lowerCamelCase__ = "relu" , lowerCamelCase__ = 6 , lowerCamelCase__ = 10 , lowerCamelCase__ = 8 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = 2_048 , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = 4 , lowerCamelCase__ = 255 , lowerCamelCase__ = 100 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 2.0 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 12_544 , lowerCamelCase__ = 3.0 , lowerCamelCase__ = 0.75 , lowerCamelCase__ = 0.02 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = True , lowerCamelCase__ = [4, 8, 16, 32] , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> Tuple:
'''simple docstring'''
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
__lowerCamelCase = CONFIG_MAPPING['swin'](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=lowerCamelCase__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase = backbone_config.pop('model_type' )
__lowerCamelCase = CONFIG_MAPPING[backbone_model_type]
__lowerCamelCase = config_class.from_dict(lowerCamelCase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
f"""Supported model types: {','.join(self.backbones_supported )}""" )
__lowerCamelCase = backbone_config
__lowerCamelCase = feature_size
__lowerCamelCase = mask_feature_size
__lowerCamelCase = hidden_dim
__lowerCamelCase = encoder_feedforward_dim
__lowerCamelCase = activation_function
__lowerCamelCase = encoder_layers
__lowerCamelCase = decoder_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = dropout
__lowerCamelCase = dim_feedforward
__lowerCamelCase = pre_norm
__lowerCamelCase = enforce_input_projection
__lowerCamelCase = common_stride
__lowerCamelCase = ignore_value
__lowerCamelCase = num_queries
__lowerCamelCase = no_object_weight
__lowerCamelCase = class_weight
__lowerCamelCase = mask_weight
__lowerCamelCase = dice_weight
__lowerCamelCase = train_num_points
__lowerCamelCase = oversample_ratio
__lowerCamelCase = importance_sample_ratio
__lowerCamelCase = init_std
__lowerCamelCase = init_xavier_std
__lowerCamelCase = use_auxiliary_loss
__lowerCamelCase = feature_strides
__lowerCamelCase = output_auxiliary_logits
__lowerCamelCase = decoder_layers
super().__init__(**lowerCamelCase__ )
@classmethod
def lowercase_ ( cls , lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
return cls(
backbone_config=lowerCamelCase__ , **lowerCamelCase__ , )
def lowercase_ ( self ) -> Dict[str, any]:
'''simple docstring'''
__lowerCamelCase = copy.deepcopy(self.__dict__ )
__lowerCamelCase = self.backbone_config.to_dict()
__lowerCamelCase = self.__class__.model_type
return output
| 348 | 0 |
"""simple docstring"""
def lowerCamelCase_ ( UpperCamelCase__ : bytes ) -> str:
"""simple docstring"""
return "".join([hex(lowercase__ )[2:].zfill(2 ).upper() for byte in list(lowercase__ )] )
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> bytes:
"""simple docstring"""
if (len(lowercase__ ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(lowercase__ ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowercase__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = 42
class __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
"""simple docstring"""
@register_to_config
def __init__( self , lowerCamelCase__ = 32 , lowerCamelCase__ = 64 , lowerCamelCase__ = 20 , lowerCamelCase__ = 768 , lowerCamelCase__=77 , lowerCamelCase__=4 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = "silu" , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "linear" , lowerCamelCase__ = "prd" , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , ) -> Tuple:
'''simple docstring'''
super().__init__()
__lowerCamelCase = num_attention_heads
__lowerCamelCase = attention_head_dim
__lowerCamelCase = num_attention_heads * attention_head_dim
__lowerCamelCase = additional_embeddings
__lowerCamelCase = time_embed_dim or inner_dim
__lowerCamelCase = embedding_proj_dim or embedding_dim
__lowerCamelCase = clip_embed_dim or embedding_dim
__lowerCamelCase = Timesteps(lowerCamelCase__ , lowerCamelCase__ , 0 )
__lowerCamelCase = TimestepEmbedding(lowerCamelCase__ , lowerCamelCase__ , out_dim=lowerCamelCase__ , act_fn=lowerCamelCase__ )
__lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
if embedding_proj_norm_type is None:
__lowerCamelCase = None
elif embedding_proj_norm_type == "layer":
__lowerCamelCase = nn.LayerNorm(lowerCamelCase__ )
else:
raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
__lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
if encoder_hid_proj_type is None:
__lowerCamelCase = None
elif encoder_hid_proj_type == "linear":
__lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
else:
raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
__lowerCamelCase = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , lowerCamelCase__ ) )
if added_emb_type == "prd":
__lowerCamelCase = nn.Parameter(torch.zeros(1 , 1 , lowerCamelCase__ ) )
elif added_emb_type is None:
__lowerCamelCase = None
else:
raise ValueError(
f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
__lowerCamelCase = nn.ModuleList(
[
BasicTransformerBlock(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , dropout=lowerCamelCase__ , activation_fn='gelu' , attention_bias=lowerCamelCase__ , )
for d in range(lowerCamelCase__ )
] )
if norm_in_type == "layer":
__lowerCamelCase = nn.LayerNorm(lowerCamelCase__ )
elif norm_in_type is None:
__lowerCamelCase = None
else:
raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" )
__lowerCamelCase = nn.LayerNorm(lowerCamelCase__ )
__lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 )
causal_attention_mask.triu_(1 )
__lowerCamelCase = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' , lowerCamelCase__ , persistent=lowerCamelCase__ )
__lowerCamelCase = nn.Parameter(torch.zeros(1 , lowerCamelCase__ ) )
__lowerCamelCase = nn.Parameter(torch.zeros(1 , lowerCamelCase__ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowercase_ ( self ) -> Dict[str, AttentionProcessor]:
'''simple docstring'''
__lowerCamelCase = {}
def fn_recursive_add_processors(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if hasattr(lowerCamelCase__ , 'set_processor' ):
__lowerCamelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , lowerCamelCase__ , lowerCamelCase__ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return processors
def lowercase_ ( self , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = len(self.attn_processors.keys() )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(lowerCamelCase__ )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if hasattr(lowerCamelCase__ , 'set_processor' ):
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
module.set_processor(lowerCamelCase__ )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , lowerCamelCase__ , lowerCamelCase__ )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = True , ) -> int:
'''simple docstring'''
__lowerCamelCase = hidden_states.shape[0]
__lowerCamelCase = timestep
if not torch.is_tensor(lowerCamelCase__ ):
__lowerCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(lowerCamelCase__ ) and len(timesteps.shape ) == 0:
__lowerCamelCase = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__lowerCamelCase = timesteps * torch.ones(lowerCamelCase__ , dtype=timesteps.dtype , device=timesteps.device )
__lowerCamelCase = self.time_proj(lowerCamelCase__ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
__lowerCamelCase = timesteps_projected.to(dtype=self.dtype )
__lowerCamelCase = self.time_embedding(lowerCamelCase__ )
if self.embedding_proj_norm is not None:
__lowerCamelCase = self.embedding_proj_norm(lowerCamelCase__ )
__lowerCamelCase = self.embedding_proj(lowerCamelCase__ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
__lowerCamelCase = self.encoder_hidden_states_proj(lowerCamelCase__ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
__lowerCamelCase = self.proj_in(lowerCamelCase__ )
__lowerCamelCase = self.positional_embedding.to(hidden_states.dtype )
__lowerCamelCase = []
__lowerCamelCase = 0
if encoder_hidden_states is not None:
additional_embeds.append(lowerCamelCase__ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
__lowerCamelCase = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
__lowerCamelCase = hidden_states[:, None, :]
__lowerCamelCase = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
__lowerCamelCase = self.prd_embedding.to(hidden_states.dtype ).expand(lowerCamelCase__ , -1 , -1 )
additional_embeds.append(lowerCamelCase__ )
__lowerCamelCase = torch.cat(
lowerCamelCase__ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
__lowerCamelCase = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
__lowerCamelCase = F.pad(
lowerCamelCase__ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
__lowerCamelCase = hidden_states + positional_embeddings
if attention_mask is not None:
__lowerCamelCase = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
__lowerCamelCase = F.pad(lowerCamelCase__ , (0, self.additional_embeddings) , value=0.0 )
__lowerCamelCase = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
__lowerCamelCase = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
__lowerCamelCase = self.norm_in(lowerCamelCase__ )
for block in self.transformer_blocks:
__lowerCamelCase = block(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
__lowerCamelCase = self.norm_out(lowerCamelCase__ )
if self.prd_embedding is not None:
__lowerCamelCase = hidden_states[:, -1]
else:
__lowerCamelCase = hidden_states[:, additional_embeddings_len:]
__lowerCamelCase = self.proj_to_clip_embeddings(lowerCamelCase__ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 348 | 0 |
import heapq
def lowerCamelCase_ ( UpperCamelCase__ : dict ) -> List[str]:
"""simple docstring"""
__lowerCamelCase = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(A__ , [-1 * len(A__ ), (key, value)] )
# chosen_vertices = set of chosen vertices
__lowerCamelCase = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
__lowerCamelCase = heapq.heappop(A__ )[1][0]
chosen_vertices.add(A__ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
__lowerCamelCase = elem[1][1].index(A__ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(A__ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 357 |
import sys
from collections import defaultdict
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = []
def lowercase_ ( self , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
return self.node_position[vertex]
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = pos
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowerCamelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowerCamelCase = 2 * start + 1
else:
__lowerCamelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowerCamelCase , __lowerCamelCase = heap[smallest_child], positions[smallest_child]
__lowerCamelCase , __lowerCamelCase = (
heap[start],
positions[start],
)
__lowerCamelCase , __lowerCamelCase = temp, tempa
__lowerCamelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , lowerCamelCase__ )
self.top_to_bottom(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = position[index]
while index != 0:
__lowerCamelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowerCamelCase = heap[parent]
__lowerCamelCase = position[parent]
self.set_position(position[parent] , lowerCamelCase__ )
else:
__lowerCamelCase = val
__lowerCamelCase = temp
self.set_position(lowerCamelCase__ , lowerCamelCase__ )
break
__lowerCamelCase = parent
else:
__lowerCamelCase = val
__lowerCamelCase = temp
self.set_position(lowerCamelCase__ , 0 )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = len(lowerCamelCase__ ) // 2 - 1
for i in range(lowerCamelCase__ , -1 , -1 ):
self.top_to_bottom(lowerCamelCase__ , lowerCamelCase__ , len(lowerCamelCase__ ) , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = positions[0]
__lowerCamelCase = sys.maxsize
self.top_to_bottom(lowerCamelCase__ , 0 , len(lowerCamelCase__ ) , lowerCamelCase__ )
return temp
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = Heap()
__lowerCamelCase = [0] * len(UpperCamelCase__ )
__lowerCamelCase = [-1] * len(UpperCamelCase__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowerCamelCase = [] # Heap of Distance of vertices from their neighboring vertex
__lowerCamelCase = []
for vertex in range(len(UpperCamelCase__ ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCamelCase__ )
heap.node_position.append(UpperCamelCase__ )
__lowerCamelCase = []
__lowerCamelCase = 1
__lowerCamelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowerCamelCase = 0
__lowerCamelCase = distance
heap.heapify(UpperCamelCase__ , UpperCamelCase__ )
for _ in range(1 , len(UpperCamelCase__ ) ):
__lowerCamelCase = heap.delete_minimum(UpperCamelCase__ , UpperCamelCase__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__lowerCamelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCamelCase__ )]
):
__lowerCamelCase = distance
heap.bottom_to_top(
UpperCamelCase__ , heap.get_position(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__A = int(input("Enter number of edges: ").strip())
__A = defaultdict(list)
for _ in range(edges_number):
__A = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 348 | 0 |
from __future__ import annotations
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any ) -> Dict:
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
__lowerCamelCase , __lowerCamelCase = array[indexa], array[indexa]
def lowerCamelCase_ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if length > 1:
__lowerCamelCase = int(length / 2 )
for i in range(UpperCamelCase__ , low + middle ):
comp_and_swap(UpperCamelCase__ , UpperCamelCase__ , i + middle , UpperCamelCase__ )
bitonic_merge(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
bitonic_merge(UpperCamelCase__ , low + middle , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] ) -> str:
"""simple docstring"""
if length > 1:
__lowerCamelCase = int(length / 2 )
bitonic_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , 1 )
bitonic_sort(UpperCamelCase__ , low + middle , UpperCamelCase__ , 0 )
bitonic_merge(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
__A = input("Enter numbers separated by a comma:\n").strip()
__A = [int(item.strip()) for item in user_input.split(",")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("\nSorted array in ascending order is: ", end="")
print(*unsorted, sep=", ")
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("Sorted array in descending order is: ", end="")
print(*unsorted, sep=", ")
| 358 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=__magic_name__ )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = field(default='''question-answering-extractive''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
snake_case_ = Features({'''question''': Value('''string''' ), '''context''': Value('''string''' )} )
snake_case_ = Features(
{
'''answers''': Sequence(
{
'''text''': Value('''string''' ),
'''answer_start''': Value('''int32''' ),
} )
} )
snake_case_ = "question"
snake_case_ = "context"
snake_case_ = "answers"
@property
def lowercase_ ( self ) -> Dict[str, str]:
'''simple docstring'''
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 348 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
__A = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["BeitFeatureExtractor"]
__A = ["BeitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBeitPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 359 |
import requests
__A = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> None:
"""simple docstring"""
__lowerCamelCase = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['articles'] , 1 ):
print(F"""{i}.) {article['title']}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
| 348 | 0 |
import random
def lowerCamelCase_ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowerCamelCase = a[left_index]
__lowerCamelCase = left_index + 1
for j in range(left_index + 1 , lowerCAmelCase__ ):
if a[j] < pivot:
__lowerCamelCase , __lowerCamelCase = a[i], a[j]
i += 1
__lowerCamelCase , __lowerCamelCase = a[i - 1], a[left_index]
return i - 1
def lowerCamelCase_ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int ) -> List[Any]:
"""simple docstring"""
if left < right:
__lowerCamelCase = random.randint(lowerCAmelCase__ , right - 1 )
__lowerCamelCase , __lowerCamelCase = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
__lowerCamelCase = partition(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
quick_sort_random(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
lowerCAmelCase__ , pivot_index + 1 , lowerCAmelCase__ ) # recursive quicksort to the right of the pivot point
def lowerCamelCase_ ( ) -> Tuple:
"""simple docstring"""
__lowerCamelCase = input('Enter numbers separated by a comma:\n' ).strip()
__lowerCamelCase = [int(lowerCAmelCase__ ) for item in user_input.split(',' )]
quick_sort_random(lowerCAmelCase__ , 0 , len(lowerCAmelCase__ ) )
print(lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 360 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__A = logging.get_logger(__name__)
__A = TypeVar("DatasetType", Dataset, IterableDataset)
def lowerCamelCase_ ( UpperCamelCase__ : List[DatasetType] , UpperCamelCase__ : Optional[List[float]] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[DatasetInfo] = None , UpperCamelCase__ : Optional[NamedSplit] = None , UpperCamelCase__ : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType:
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(UpperCamelCase__ ):
if not isinstance(UpperCamelCase__ , (Dataset, IterableDataset) ):
if isinstance(UpperCamelCase__ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'is an empty dataset dictionary.' )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(UpperCamelCase__ )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(UpperCamelCase__ ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase__ ).__name__}.""" )
if i == 0:
__lowerCamelCase , __lowerCamelCase = (
(Dataset, IterableDataset) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else (IterableDataset, Dataset)
)
elif not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , stopping_strategy=UpperCamelCase__ )
else:
return _interleave_iterable_datasets(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , stopping_strategy=UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : List[DatasetType] , UpperCamelCase__ : Optional[DatasetInfo] = None , UpperCamelCase__ : Optional[NamedSplit] = None , UpperCamelCase__ : int = 0 , ) -> DatasetType:
"""simple docstring"""
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(UpperCamelCase__ ):
if not isinstance(UpperCamelCase__ , (Dataset, IterableDataset) ):
if isinstance(UpperCamelCase__ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'is an empty dataset dictionary.' )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(UpperCamelCase__ )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(UpperCamelCase__ ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase__ ).__name__}.""" )
if i == 0:
__lowerCamelCase , __lowerCamelCase = (
(Dataset, IterableDataset) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else (IterableDataset, Dataset)
)
elif not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , axis=UpperCamelCase__ )
else:
return _concatenate_iterable_datasets(UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , axis=UpperCamelCase__ )
| 348 | 0 |
"""simple docstring"""
from typing import Dict, Optional
import numpy as np
import datasets
__A = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n'
__A = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n'
__A = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}'
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str = False , ) -> Dict:
"""simple docstring"""
if label_map is not None:
for old_id, new_id in label_map.items():
__lowerCamelCase = new_id
# turn into Numpy arrays
__lowerCamelCase = np.array(a__ )
__lowerCamelCase = np.array(a__ )
if reduce_labels:
__lowerCamelCase = 255
__lowerCamelCase = label - 1
__lowerCamelCase = 255
__lowerCamelCase = label != ignore_index
__lowerCamelCase = np.not_equal(a__ , a__ )
__lowerCamelCase = pred_label[mask]
__lowerCamelCase = np.array(a__ )[mask]
__lowerCamelCase = pred_label[pred_label == label]
__lowerCamelCase = np.histogram(a__ , bins=a__ , range=(0, num_labels - 1) )[0]
__lowerCamelCase = np.histogram(a__ , bins=a__ , range=(0, num_labels - 1) )[0]
__lowerCamelCase = np.histogram(a__ , bins=a__ , range=(0, num_labels - 1) )[0]
__lowerCamelCase = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : str = None , UpperCamelCase__ : Tuple = False , ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = np.zeros((num_labels,) , dtype=np.floataa )
__lowerCamelCase = np.zeros((num_labels,) , dtype=np.floataa )
__lowerCamelCase = np.zeros((num_labels,) , dtype=np.floataa )
__lowerCamelCase = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(a__ , a__ ):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = intersect_and_union(
a__ , a__ , a__ , a__ , a__ , a__ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict = None , UpperCamelCase__ : str = None , UpperCamelCase__ : Optional[int] = False , ) -> str:
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = total_intersect_and_union(
a__ , a__ , a__ , a__ , a__ , a__ )
# compute metrics
__lowerCamelCase = {}
__lowerCamelCase = total_area_intersect.sum() / total_area_label.sum()
__lowerCamelCase = total_area_intersect / total_area_union
__lowerCamelCase = total_area_intersect / total_area_label
__lowerCamelCase = np.nanmean(a__ )
__lowerCamelCase = np.nanmean(a__ )
__lowerCamelCase = all_acc
__lowerCamelCase = iou
__lowerCamelCase = acc
if nan_to_num is not None:
__lowerCamelCase = {metric: np.nan_to_num(a__ , nan=a__ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
} ) , reference_urls=[
'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'
] , )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = False , ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = mean_iou(
results=_snake_case , gt_seg_maps=_snake_case , num_labels=_snake_case , ignore_index=_snake_case , nan_to_num=_snake_case , label_map=_snake_case , reduce_labels=_snake_case , )
return iou_result
| 361 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = ["model.decoder.embed_positions.weights"]
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> List[Any]:
"""simple docstring"""
if "emb" in name:
__lowerCamelCase = name.replace('emb' , 'model.decoder.embed_tokens' )
if "transformer" in name:
__lowerCamelCase = name.replace('transformer' , 'model.decoder' )
if "cross_attention" in name:
__lowerCamelCase = name.replace('cross_attention' , 'encoder_attn' )
if "linear1" in name:
__lowerCamelCase = name.replace('linear1' , 'fc1' )
if "linear2" in name:
__lowerCamelCase = name.replace('linear2' , 'fc2' )
if "norm1" in name:
__lowerCamelCase = name.replace('norm1' , 'self_attn_layer_norm' )
if "norm_cross" in name:
__lowerCamelCase = name.replace('norm_cross' , 'encoder_attn_layer_norm' )
if "norm2" in name:
__lowerCamelCase = name.replace('norm2' , 'final_layer_norm' )
if "out_norm" in name:
__lowerCamelCase = name.replace('out_norm' , 'model.decoder.layer_norm' )
if "linears" in name:
__lowerCamelCase = name.replace('linears' , 'lm_heads' )
if "condition_provider.conditioners.description.output_proj" in name:
__lowerCamelCase = name.replace('condition_provider.conditioners.description.output_proj' , 'enc_to_dec_proj' )
return name
def lowerCamelCase_ ( UpperCamelCase__ : OrderedDict , UpperCamelCase__ : int ) -> Tuple[Dict, Dict]:
"""simple docstring"""
__lowerCamelCase = list(state_dict.keys() )
__lowerCamelCase = {}
for key in keys:
__lowerCamelCase = state_dict.pop(UpperCamelCase__ )
__lowerCamelCase = rename_keys(UpperCamelCase__ )
if "in_proj_weight" in key:
# split fused qkv proj
__lowerCamelCase = val[:hidden_size, :]
__lowerCamelCase = val[hidden_size : 2 * hidden_size, :]
__lowerCamelCase = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__lowerCamelCase = val
else:
__lowerCamelCase = val
return state_dict, enc_dec_proj_state_dict
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
__lowerCamelCase = 1024
__lowerCamelCase = 24
__lowerCamelCase = 16
elif checkpoint == "medium":
__lowerCamelCase = 1536
__lowerCamelCase = 48
__lowerCamelCase = 24
elif checkpoint == "large":
__lowerCamelCase = 2048
__lowerCamelCase = 48
__lowerCamelCase = 32
else:
raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
__lowerCamelCase = MusicgenDecoderConfig(
hidden_size=UpperCamelCase__ , ffn_dim=hidden_size * 4 , num_hidden_layers=UpperCamelCase__ , num_attention_heads=UpperCamelCase__ , )
return config
@torch.no_grad()
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Optional[int]="cpu" ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = MusicGen.get_pretrained(UpperCamelCase__ , device=UpperCamelCase__ )
__lowerCamelCase = decoder_config_from_checkpoint(UpperCamelCase__ )
__lowerCamelCase = fairseq_model.lm.state_dict()
__lowerCamelCase , __lowerCamelCase = rename_state_dict(
UpperCamelCase__ , hidden_size=decoder_config.hidden_size )
__lowerCamelCase = TaEncoderModel.from_pretrained('t5-base' )
__lowerCamelCase = EncodecModel.from_pretrained('facebook/encodec_32khz' )
__lowerCamelCase = MusicgenForCausalLM(UpperCamelCase__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__lowerCamelCase , __lowerCamelCase = decoder.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
for key in missing_keys.copy():
if key.startswith(('text_encoder', 'audio_encoder') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(UpperCamelCase__ ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
__lowerCamelCase = MusicgenForConditionalGeneration(text_encoder=UpperCamelCase__ , audio_encoder=UpperCamelCase__ , decoder=UpperCamelCase__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(UpperCamelCase__ )
# check we can do a forward pass
__lowerCamelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__lowerCamelCase = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__lowerCamelCase = model(input_ids=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('Incorrect shape for logits' )
# now construct the processor
__lowerCamelCase = AutoTokenizer.from_pretrained('t5-base' )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained('facebook/encodec_32khz' , padding_side='left' )
__lowerCamelCase = MusicgenProcessor(feature_extractor=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
# set the appropriate bos/pad token ids
__lowerCamelCase = 2048
__lowerCamelCase = 2048
# set other default generation config params
__lowerCamelCase = int(30 * audio_encoder.config.frame_rate )
__lowerCamelCase = True
__lowerCamelCase = 3.0
if pytorch_dump_folder is not None:
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(UpperCamelCase__ )
processor.push_to_hub(UpperCamelCase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
__A = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 348 | 0 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
__A = ["bert-base-uncased", "bert-base-cased"]
__A = "hf-internal-testing/tiny-bert-tf-only"
if is_tf_available():
class __lowerCAmelCase ( tf.keras.Model ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ ) -> Any:
'''simple docstring'''
super().__init__()
__lowerCamelCase = tokenizer
__lowerCamelCase = AutoConfig.from_pretrained(lowerCamelCase__ )
__lowerCamelCase = TFAutoModel.from_config(lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = self.tokenizer(lowerCamelCase__ )
__lowerCamelCase = self.bert(**lowerCamelCase__ )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
super().setUp()
__lowerCamelCase = [
BertTokenizer.from_pretrained(lowerCamelCase__ ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
__lowerCamelCase = [TFBertTokenizer.from_pretrained(lowerCamelCase__ ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(lowerCamelCase__ , use_fast_bert_tokenizer=lowerCamelCase__ )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__lowerCamelCase = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
__lowerCamelCase = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
__lowerCamelCase = tokenizer(lowerCamelCase__ , return_tensors='tf' , padding='longest' )
__lowerCamelCase = tf_tokenizer(lowerCamelCase__ )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
__lowerCamelCase = tf_tokenizer(self.paired_sentences )
__lowerCamelCase = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def lowercase_ ( self ) -> Any:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
__lowerCamelCase = tf.function(lowerCamelCase__ )
for test_inputs in (self.test_sentences, self.paired_sentences):
__lowerCamelCase = tf.constant(lowerCamelCase__ )
__lowerCamelCase = compiled_tokenizer(lowerCamelCase__ )
__lowerCamelCase = tf_tokenizer(lowerCamelCase__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
__lowerCamelCase = ModelToSave(tokenizer=lowerCamelCase__ )
__lowerCamelCase = tf.convert_to_tensor(self.test_sentences )
__lowerCamelCase = model(lowerCamelCase__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__lowerCamelCase = Path(lowerCamelCase__ ) / 'saved.model'
model.save(lowerCamelCase__ )
__lowerCamelCase = tf.keras.models.load_model(lowerCamelCase__ )
__lowerCamelCase = loaded_model(lowerCamelCase__ )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1e-5 )
| 362 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''sew-d'''
def __init__( self , lowerCamelCase__=32 , lowerCamelCase__=768 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__=3_072 , lowerCamelCase__=2 , lowerCamelCase__=512 , lowerCamelCase__=256 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=("p2c", "c2p") , lowerCamelCase__="layer_norm" , lowerCamelCase__="gelu_python" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.0 , lowerCamelCase__=0.1 , lowerCamelCase__=0.02 , lowerCamelCase__=1e-7 , lowerCamelCase__=1e-5 , lowerCamelCase__="group" , lowerCamelCase__="gelu" , lowerCamelCase__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowerCamelCase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCamelCase__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCamelCase__=False , lowerCamelCase__=128 , lowerCamelCase__=16 , lowerCamelCase__=True , lowerCamelCase__=0.05 , lowerCamelCase__=10 , lowerCamelCase__=2 , lowerCamelCase__=0.0 , lowerCamelCase__=10 , lowerCamelCase__=0 , lowerCamelCase__="mean" , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=256 , lowerCamelCase__=0 , lowerCamelCase__=1 , lowerCamelCase__=2 , **lowerCamelCase__ , ) -> Any:
'''simple docstring'''
super().__init__(**lowerCamelCase__ , pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ )
__lowerCamelCase = hidden_size
__lowerCamelCase = feat_extract_norm
__lowerCamelCase = feat_extract_activation
__lowerCamelCase = list(lowerCamelCase__ )
__lowerCamelCase = list(lowerCamelCase__ )
__lowerCamelCase = list(lowerCamelCase__ )
__lowerCamelCase = conv_bias
__lowerCamelCase = num_conv_pos_embeddings
__lowerCamelCase = num_conv_pos_embedding_groups
__lowerCamelCase = len(self.conv_dim )
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = intermediate_size
__lowerCamelCase = squeeze_factor
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = position_buckets
__lowerCamelCase = share_att_key
__lowerCamelCase = relative_attention
__lowerCamelCase = norm_rel_ebd
__lowerCamelCase = list(lowerCamelCase__ )
__lowerCamelCase = hidden_act
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = feat_proj_dropout
__lowerCamelCase = final_dropout
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = feature_layer_norm_eps
__lowerCamelCase = initializer_range
__lowerCamelCase = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCamelCase = apply_spec_augment
__lowerCamelCase = mask_time_prob
__lowerCamelCase = mask_time_length
__lowerCamelCase = mask_time_min_masks
__lowerCamelCase = mask_feature_prob
__lowerCamelCase = mask_feature_length
__lowerCamelCase = mask_feature_min_masks
# ctc loss
__lowerCamelCase = ctc_loss_reduction
__lowerCamelCase = ctc_zero_infinity
# sequence classification
__lowerCamelCase = use_weighted_layer_sum
__lowerCamelCase = classifier_proj_size
@property
def lowercase_ ( self ) -> Any:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 348 | 0 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = tf.convert_to_tensor(
[
[
8.2_22_09_91, # 3rd highest value; idx. 0
-0.5_62_00_44,
5.23_22_97_52,
4.0_38_63_93,
-6.8_79_83_78,
-0.54_78_58_02,
-3.2_01_21_53,
2.92_77_71_76,
1.88_17_19_53,
7.35_34_12_76, # 5th highest value; idx. 9
8.43_20_78_33, # 2nd highest value; idx. 10
-9.85_71_18_36,
-5.96_20_92_36,
-1.13_03_91_61,
-7.1_11_52_94,
-0.8_36_96_33,
-5.3_18_64_08,
7.06_42_74_07,
0.81_36_93_44,
-0.82_02_38_17,
-5.9_17_97_96,
0.58_81_34_43,
-6.99_77_84_38,
4.71_55_11_89,
-0.18_77_16_37,
7.44_02_07_59, # 4th highest value; idx. 25
9.38_45_09_87, # 1st highest value; idx. 26
2.12_66_29_41,
-9.32_56_20_38,
2.35_65_25_22,
], # cummulative prob of 5 highest values <= 0.6
[
0.58_42_55_18,
4.53_13_92_38,
-5.57_51_04_64,
-6.28_03_06_99,
-7.19_52_95_03,
-4.02_12_25_51,
1.39_33_70_37,
-6.06_70_70_57,
1.59_48_05_17,
-9.64_31_19,
0.03_90_77_99,
0.67_23_17_62,
-8.88_20_67_26,
6.27_11_59_22, # 4th highest value; idx. 13
2.28_52_07_23,
4.82_76_75_06,
4.30_42_13_68,
8.8_27_53_13, # 2nd highest value; idx. 17
5.44_02_99_58, # 5th highest value; idx. 18
-4.4_73_57_94,
7.38_57_95_36, # 3rd highest value; idx. 20
-2.91_05_16_63,
2.61_94_60_77,
-2.5_67_47_62,
-9.48_95_93_02,
-4.02_92_26_45,
-1.35_41_69_18,
9.67_70_23_23, # 1st highest value; idx. 27
-5.89_47_85_53,
1.85_37_04_67,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
__lowerCamelCase = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
__lowerCamelCase = tf.convert_to_tensor(
[8.22_20_99, 7.3_53_41_26, 8.43_20_78, 7.4_40_20_75, 9.3_84_51, 6.27_11_59, 8.82_75_31, 5.4_40_29_95, 7.3_85_79_56, 9.67_70_23] , dtype=tf.floataa , ) # expected non filtered values as noted above
__lowerCamelCase = tf_top_k_top_p_filtering(_a , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
__lowerCamelCase = output[output != -float('inf' )]
__lowerCamelCase = tf.cast(
tf.where(tf.not_equal(_a , tf.constant(-float('inf' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(_a , _a , rtol=1e-12 )
tf.debugging.assert_equal(_a , _a )
@require_tf
class __lowerCAmelCase ( unittest.TestCase , __magic_name__ ):
"""simple docstring"""
if is_tf_available():
snake_case_ = {
'''AutoModelForCausalLM''': TFAutoModelForCausalLM,
'''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq,
'''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM,
'''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq,
'''LogitsProcessorList''': TFLogitsProcessorList,
'''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor,
'''create_tensor_fn''': tf.convert_to_tensor,
'''floats_tensor''': floats_tensor,
'''return_tensors''': '''tf''',
}
@slow
def lowercase_ ( self ) -> str:
'''simple docstring'''
# TF-only test: tf.saved_model export
__lowerCamelCase = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
__lowerCamelCase = 2
__lowerCamelCase = 2
class __lowerCAmelCase ( tf.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ ) -> Any:
'''simple docstring'''
super(_a , self ).__init__()
__lowerCamelCase = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='input_ids' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='attention_mask' ),
) , jit_compile=_a , )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = self.model.generate(
input_ids=_a , attention_mask=_a , max_new_tokens=_a , return_dict_in_generate=_a , )
return {"sequences": outputs["sequences"]}
__lowerCamelCase = [[2, 0], [102, 103]]
__lowerCamelCase = [[1, 0], [1, 1]]
__lowerCamelCase = DummyModel(model=_a )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_a , _a , signatures={'serving_default': dummy_model.serving} )
__lowerCamelCase = tf.saved_model.load(_a ).signatures['serving_default']
for batch_size in range(1 , len(_a ) + 1 ):
__lowerCamelCase = {
'input_ids': tf.constant(dummy_input_ids[:batch_size] ),
'attention_mask': tf.constant(dummy_attention_masks[:batch_size] ),
}
__lowerCamelCase = serving_func(**_a )['sequences']
__lowerCamelCase = test_model.generate(**_a , max_new_tokens=_a )
tf.debugging.assert_equal(_a , _a )
@slow
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
# TF-only test: tf.saved_model export
__lowerCamelCase = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
__lowerCamelCase = 1
__lowerCamelCase = 2
class __lowerCAmelCase ( tf.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ ) -> int:
'''simple docstring'''
super(_a , self ).__init__()
__lowerCamelCase = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='input_ids' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='attention_mask' ),
) , jit_compile=_a , )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = self.model.generate(
input_ids=_a , attention_mask=_a , max_new_tokens=_a , return_dict_in_generate=_a , )
return {"sequences": outputs["sequences"]}
__lowerCamelCase = [[2], [102, 103]]
__lowerCamelCase = [[1], [1, 1]]
__lowerCamelCase = DummyModel(model=_a )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_a , _a , signatures={'serving_default': dummy_model.serving} )
__lowerCamelCase = tf.saved_model.load(_a ).signatures['serving_default']
for input_row in range(len(_a ) ):
__lowerCamelCase = {
'input_ids': tf.constant([dummy_input_ids[input_row]] ),
'attention_mask': tf.constant([dummy_attention_masks[input_row]] ),
}
__lowerCamelCase = serving_func(**_a )['sequences']
__lowerCamelCase = test_model.generate(**_a , max_new_tokens=_a )
tf.debugging.assert_equal(_a , _a )
@slow
@require_tensorflow_text
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='google/flan-t5-small' , filename='spiece.model' , local_dir=_a )
class __lowerCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self ) -> int:
'''simple docstring'''
super().__init__()
__lowerCamelCase = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(_a , 'spiece.model' ) , 'rb' ).read() )
__lowerCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained('hf-internal-testing/tiny-random-t5' )
def lowercase_ ( self , lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = self.tokenizer.tokenize(_a )
__lowerCamelCase , __lowerCamelCase = text.pad_model_inputs(
_a , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
__lowerCamelCase = self.model.generate(input_ids=_a , attention_mask=_a )
return self.tokenizer.detokenize(_a )
__lowerCamelCase = CompleteSentenceTransformer()
__lowerCamelCase = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='inputs' )
__lowerCamelCase = complete_model(_a )
__lowerCamelCase = tf.keras.Model(_a , _a )
keras_model.save(_a )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
# Has PT equivalent: this test relies on random sampling
__lowerCamelCase = {
'do_sample': True,
'num_beams': 1,
'top_p': 0.7,
'top_k': 10,
'temperature': 0.7,
}
__lowerCamelCase = 14
__lowerCamelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
__lowerCamelCase = 'Hello, my dog is cute and'
__lowerCamelCase = tokenizer(_a , return_tensors='tf' )
__lowerCamelCase = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
__lowerCamelCase = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
__lowerCamelCase = model.generate(**_a , eos_token_id=_a , **_a )
self.assertTrue(expectation == len(generated_tokens[0] ) )
__lowerCamelCase = [638, 198]
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
__lowerCamelCase = model.generate(**_a , eos_token_id=_a , **_a )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def lowercase_ ( self ) -> int:
'''simple docstring'''
# Has PT equivalent: ample use of framework-specific code
__lowerCamelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bart' )
__lowerCamelCase = 'Hugging Face is a technology company based in New York and Paris.'
__lowerCamelCase = bart_tokenizer(_a , return_tensors='tf' ).input_ids
__lowerCamelCase = TFBartForConditionalGeneration.from_pretrained('hf-internal-testing/tiny-random-bart' )
__lowerCamelCase = bart_model.generate(_a ).numpy()
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=None , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
return super().call(_a , **_a )
__lowerCamelCase = FakeBart.from_pretrained('hf-internal-testing/tiny-random-bart' )
__lowerCamelCase = bart_model.generate(_a , foo='bar' ).numpy()
self.assertTrue(np.array_equal(_a , _a ) )
class __lowerCAmelCase ( bart_model.model.encoder.__class__ ):
"""simple docstring"""
def lowercase_ ( self , lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
return super().call(_a , **_a )
__lowerCamelCase = FakeEncoder(bart_model.config , bart_model.model.shared )
__lowerCamelCase = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
__lowerCamelCase = bart_model.generate(_a ).numpy()
with self.assertRaises(_a ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(_a , foo='bar' )
| 363 |
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__A = logging.get_logger("transformers.models.speecht5")
__A = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
__A = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
__A = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
__A = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
__A = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
__A = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
__A = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
__A = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
__A = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__A = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__A = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__A = []
__A = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
__A = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
__A = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
__A = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ) -> Dict:
"""simple docstring"""
for attribute in key.split('.' ):
__lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ )
if weight_type is not None:
__lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ ).shape
else:
__lowerCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__lowerCamelCase = value
elif weight_type == "weight_g":
__lowerCamelCase = value
elif weight_type == "weight_v":
__lowerCamelCase = value
elif weight_type == "bias":
__lowerCamelCase = value
elif weight_type == "running_mean":
__lowerCamelCase = value
elif weight_type == "running_var":
__lowerCamelCase = value
elif weight_type == "num_batches_tracked":
__lowerCamelCase = value
else:
__lowerCamelCase = value
logger.info(F"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict ) -> Any:
"""simple docstring"""
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
__lowerCamelCase , __lowerCamelCase = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowerCamelCase = []
if task == "s2t":
__lowerCamelCase = hf_model.speechta.encoder.prenet.feature_encoder
__lowerCamelCase = MAPPING_S2T
__lowerCamelCase = IGNORE_KEYS_S2T
elif task == "t2s":
__lowerCamelCase = None
__lowerCamelCase = MAPPING_T2S
__lowerCamelCase = IGNORE_KEYS_T2S
elif task == "s2s":
__lowerCamelCase = hf_model.speechta.encoder.prenet.feature_encoder
__lowerCamelCase = MAPPING_S2S
__lowerCamelCase = IGNORE_KEYS_S2S
else:
raise ValueError(F"""Unsupported task: {task}""" )
for name, value in fairseq_dict.items():
if should_ignore(UpperCamelCase__ , UpperCamelCase__ ):
logger.info(F"""{name} was ignored""" )
continue
__lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hf_model.config.feat_extract_norm == 'group' , )
__lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
__lowerCamelCase , __lowerCamelCase = key.split('.*.' )
if prefix in name and suffix in name:
__lowerCamelCase = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
__lowerCamelCase = True
if "*" in mapped_key:
__lowerCamelCase = name.split(UpperCamelCase__ )[0].split('.' )[-2]
__lowerCamelCase = mapped_key.replace('*' , UpperCamelCase__ )
if "weight_g" in name:
__lowerCamelCase = 'weight_g'
elif "weight_v" in name:
__lowerCamelCase = 'weight_v'
elif "bias" in name:
__lowerCamelCase = 'bias'
elif "weight" in name:
__lowerCamelCase = 'weight'
elif "running_mean" in name:
__lowerCamelCase = 'running_mean'
elif "running_var" in name:
__lowerCamelCase = 'running_var'
elif "num_batches_tracked" in name:
__lowerCamelCase = 'num_batches_tracked'
else:
__lowerCamelCase = None
set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
continue
if not is_used:
unused_weights.append(UpperCamelCase__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : List[str] ) -> Tuple:
"""simple docstring"""
__lowerCamelCase = full_name.split('conv_layers.' )[-1]
__lowerCamelCase = name.split('.' )
__lowerCamelCase = int(items[0] )
__lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCamelCase__ )
@torch.no_grad()
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : str=None , UpperCamelCase__ : List[str]=None , ) -> Tuple:
"""simple docstring"""
if config_path is not None:
__lowerCamelCase = SpeechTaConfig.from_pretrained(UpperCamelCase__ )
else:
__lowerCamelCase = SpeechTaConfig()
if task == "s2t":
__lowerCamelCase = config.max_text_positions
__lowerCamelCase = SpeechTaForSpeechToText(UpperCamelCase__ )
elif task == "t2s":
__lowerCamelCase = 1876
__lowerCamelCase = 600
__lowerCamelCase = config.max_speech_positions
__lowerCamelCase = SpeechTaForTextToSpeech(UpperCamelCase__ )
elif task == "s2s":
__lowerCamelCase = 1876
__lowerCamelCase = config.max_speech_positions
__lowerCamelCase = SpeechTaForSpeechToSpeech(UpperCamelCase__ )
else:
raise ValueError(F"""Unknown task name: {task}""" )
if vocab_path:
__lowerCamelCase = SpeechTaTokenizer(UpperCamelCase__ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
__lowerCamelCase = AddedToken('<mask>' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
__lowerCamelCase = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
__lowerCamelCase = SpeechTaFeatureExtractor()
__lowerCamelCase = SpeechTaProcessor(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
__lowerCamelCase = torch.load(UpperCamelCase__ )
recursively_load_weights(fairseq_checkpoint['model'] , UpperCamelCase__ , UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
if repo_id:
print('Pushing to the hub...' )
processor.push_to_hub(UpperCamelCase__ )
model.push_to_hub(UpperCamelCase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
__A = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 348 | 0 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'''huggingface/time-series-transformer-tourism-monthly''': (
'''https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'''
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
snake_case_ = 'time_series_transformer'
snake_case_ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "student_t" , lowerCamelCase__ = "nll" , lowerCamelCase__ = 1 , lowerCamelCase__ = [1, 2, 3, 4, 5, 6, 7] , lowerCamelCase__ = "mean" , lowerCamelCase__ = 0 , lowerCamelCase__ = 0 , lowerCamelCase__ = 0 , lowerCamelCase__ = 0 , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 32 , lowerCamelCase__ = 32 , lowerCamelCase__ = 2 , lowerCamelCase__ = 2 , lowerCamelCase__ = 2 , lowerCamelCase__ = 2 , lowerCamelCase__ = True , lowerCamelCase__ = "gelu" , lowerCamelCase__ = 64 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 100 , lowerCamelCase__ = 0.02 , lowerCamelCase__=True , **lowerCamelCase__ , ) -> Optional[int]:
'''simple docstring'''
# time series specific configuration
__lowerCamelCase = prediction_length
__lowerCamelCase = context_length or prediction_length
__lowerCamelCase = distribution_output
__lowerCamelCase = loss
__lowerCamelCase = input_size
__lowerCamelCase = num_time_features
__lowerCamelCase = lags_sequence
__lowerCamelCase = scaling
__lowerCamelCase = num_dynamic_real_features
__lowerCamelCase = num_static_real_features
__lowerCamelCase = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(_SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
__lowerCamelCase = cardinality
else:
__lowerCamelCase = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(_SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
__lowerCamelCase = embedding_dimension
else:
__lowerCamelCase = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__lowerCamelCase = num_parallel_samples
# Transformer architecture configuration
__lowerCamelCase = input_size * len(_SCREAMING_SNAKE_CASE ) + self._number_of_features
__lowerCamelCase = d_model
__lowerCamelCase = encoder_attention_heads
__lowerCamelCase = decoder_attention_heads
__lowerCamelCase = encoder_ffn_dim
__lowerCamelCase = decoder_ffn_dim
__lowerCamelCase = encoder_layers
__lowerCamelCase = decoder_layers
__lowerCamelCase = dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = encoder_layerdrop
__lowerCamelCase = decoder_layerdrop
__lowerCamelCase = activation_function
__lowerCamelCase = init_std
__lowerCamelCase = use_cache
super().__init__(is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def lowercase_ ( self ) -> int:
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 364 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = [False] * len(UpperCamelCase__ )
__lowerCamelCase = [-1] * len(UpperCamelCase__ )
def dfs(UpperCamelCase__ : int , UpperCamelCase__ : int ):
__lowerCamelCase = True
__lowerCamelCase = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCamelCase__ , 1 - c )
for i in range(len(UpperCamelCase__ ) ):
if not visited[i]:
dfs(UpperCamelCase__ , 0 )
for i in range(len(UpperCamelCase__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
__A = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 348 | 0 |
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
__A = re.compile(R"\s+")
def lowerCamelCase_ ( UpperCamelCase__ : List[str] ) -> str:
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(_UpperCAmelCase , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ) -> str:
"""simple docstring"""
__lowerCamelCase = [len(_UpperCAmelCase ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(_UpperCAmelCase ), "line_max": max(_UpperCAmelCase )}
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> str:
"""simple docstring"""
__lowerCamelCase = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] ) -> Dict:
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str]=5 ) -> Optional[int]:
"""simple docstring"""
__lowerCamelCase = ["auto-generated", "autogenerated", "automatically generated"]
__lowerCamelCase = example["content"].splitlines()
for _, line in zip(range(_UpperCAmelCase ) , _UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any]=5 , UpperCamelCase__ : int=0.05 ) -> List[str]:
"""simple docstring"""
__lowerCamelCase = ["unit tests", "test file", "configuration file"]
__lowerCamelCase = example["content"].splitlines()
__lowerCamelCase = 0
__lowerCamelCase = 0
# first test
for _, line in zip(range(_UpperCAmelCase ) , _UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
__lowerCamelCase = example["content"].count('\n' )
__lowerCamelCase = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> Optional[Any]:
"""simple docstring"""
__lowerCamelCase = ["def ", "class ", "for ", "while "]
__lowerCamelCase = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any]=4 ) -> Optional[Any]:
"""simple docstring"""
__lowerCamelCase = example["content"].splitlines()
__lowerCamelCase = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def lowerCamelCase_ ( UpperCamelCase__ : int ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = tokenizer(example['content'] , truncation=_UpperCAmelCase )["input_ids"]
__lowerCamelCase = len(example['content'] ) / len(_UpperCAmelCase )
return {"ratio": ratio}
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> List[str]:
"""simple docstring"""
__lowerCamelCase = {}
results.update(get_hash(_UpperCAmelCase ) )
results.update(line_stats(_UpperCAmelCase ) )
results.update(alpha_stats(_UpperCAmelCase ) )
results.update(char_token_ratio(_UpperCAmelCase ) )
results.update(is_autogenerated(_UpperCAmelCase ) )
results.update(is_config_or_test(_UpperCAmelCase ) )
results.update(has_no_keywords(_UpperCAmelCase ) )
results.update(has_few_assignments(_UpperCAmelCase ) )
return results
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] ) -> int:
"""simple docstring"""
if not check_uniques(_UpperCAmelCase , _UpperCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
with open(_UpperCAmelCase , 'rb' ) as f_in:
with gzip.open(str(_UpperCAmelCase ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(_UpperCAmelCase , _UpperCAmelCase )
os.unlink(_UpperCAmelCase )
# Settings
__A = HfArgumentParser(PreprocessingArguments)
__A = parser.parse_args()
if args.num_workers is None:
__A = multiprocessing.cpu_count()
__A = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
__A = time.time()
__A = load_dataset(args.dataset_name, split="train")
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
__A = time.time()
__A = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
__A = set(ds.unique("hash"))
__A = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
__A = time.time()
__A = ds.filter(filter, fn_kwargs={"uniques": uniques, "args": args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
__A = time.time()
__A = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
__A = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / "duplicate_clusters.json", "w") as f:
json.dump(duplicate_clusters, f)
__A = output_dir / "data"
data_dir.mkdir(exist_ok=True)
__A = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
__A = str(data_dir / f'''file-{file_number+1:012}.json''')
__A = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''')
| 365 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=lowerCamelCase__ ).to(lowerCamelCase__ )
__lowerCamelCase = AutoTokenizer.from_pretrained('google/mt5-small' )
__lowerCamelCase = tokenizer('Hello there' , return_tensors='pt' ).input_ids
__lowerCamelCase = tokenizer('Hi I am' , return_tensors='pt' ).input_ids
__lowerCamelCase = model(input_ids.to(lowerCamelCase__ ) , labels=labels.to(lowerCamelCase__ ) ).loss
__lowerCamelCase = -(labels.shape[-1] * loss.item())
__lowerCamelCase = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 348 | 0 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class __lowerCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
snake_case_ = (
{
"""feature-extraction""": TFMobileBertModel,
"""fill-mask""": TFMobileBertForMaskedLM,
"""question-answering""": TFMobileBertForQuestionAnswering,
"""text-classification""": TFMobileBertForSequenceClassification,
"""token-classification""": TFMobileBertForTokenClassification,
"""zero-shot""": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case_ = False
snake_case_ = False
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ) -> str:
'''simple docstring'''
__lowerCamelCase = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if model_class in get_values(lowerCAmelCase__ ):
__lowerCamelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class __lowerCAmelCase ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=32 , lowerCamelCase__=2 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=16 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=3 , lowerCamelCase__=4 , lowerCamelCase__=None , ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
__lowerCamelCase = embedding_size
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = TFMobileBertModel(config=lowerCAmelCase__ )
__lowerCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowerCamelCase = model(lowerCAmelCase__ )
__lowerCamelCase = [input_ids, input_mask]
__lowerCamelCase = model(lowerCAmelCase__ )
__lowerCamelCase = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = TFMobileBertForMaskedLM(config=lowerCAmelCase__ )
__lowerCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowerCamelCase = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = TFMobileBertForNextSentencePrediction(config=lowerCAmelCase__ )
__lowerCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowerCamelCase = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = TFMobileBertForPreTraining(config=lowerCAmelCase__ )
__lowerCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowerCamelCase = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = TFMobileBertForSequenceClassification(config=lowerCAmelCase__ )
__lowerCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowerCamelCase = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = self.num_choices
__lowerCamelCase = TFMobileBertForMultipleChoice(config=lowerCAmelCase__ )
__lowerCamelCase = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__lowerCamelCase = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = TFMobileBertForTokenClassification(config=lowerCAmelCase__ )
__lowerCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowerCamelCase = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = TFMobileBertForQuestionAnswering(config=lowerCAmelCase__ )
__lowerCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowerCamelCase = model(lowerCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = TFMobileBertModelTest.TFMobileBertModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCAmelCase__ )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCAmelCase__ )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCAmelCase__ )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCAmelCase__ )
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCAmelCase__ )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCAmelCase__ )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCAmelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCAmelCase__ )
@slow
def lowercase_ ( self ) -> int:
'''simple docstring'''
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
__lowerCamelCase = TFMobileBertModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = TFMobileBertForPreTraining.from_pretrained('google/mobilebert-uncased' )
__lowerCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowerCamelCase = model(lowerCAmelCase__ )[0]
__lowerCamelCase = [1, 6, 30_522]
self.assertEqual(output.shape , lowerCAmelCase__ )
__lowerCamelCase = tf.constant(
[
[
[-4.5_91_95_47, -9.24_82_95, -9.64_52_56],
[-6.7_30_61_75, -6.44_02_84, -6.6_05_28_37],
[-7.2_74_35_06, -6.7_84_79_15, -6.02_46_73],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCAmelCase__ , atol=1e-4 )
| 366 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> Any:
"""simple docstring"""
__lowerCamelCase = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2]
__lowerCamelCase = True if 'large' in model_name or 'huge' in model_name else False
__lowerCamelCase = True if 'large' in model_name or 'huge' in model_name else False
__lowerCamelCase = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__lowerCamelCase = [3, 3, 3, 3]
__lowerCamelCase = [5, 5, 5, 5]
elif "fl4" in model_name:
__lowerCamelCase = [4, 4, 4, 4]
__lowerCamelCase = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__lowerCamelCase = [3, 3, 3, 3]
if "lrf" in model_name:
__lowerCamelCase = [3, 3, 3, 3]
else:
__lowerCamelCase = [2, 2, 2, 2]
if "tiny" in model_name:
__lowerCamelCase = 96
elif "small" in model_name:
__lowerCamelCase = 96
elif "base" in model_name:
__lowerCamelCase = 128
elif "large" in model_name:
__lowerCamelCase = 192
elif "xlarge" in model_name:
__lowerCamelCase = 256
elif "huge" in model_name:
__lowerCamelCase = 352
# set label information
__lowerCamelCase = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
__lowerCamelCase = 'imagenet-22k-id2label.json'
else:
__lowerCamelCase = 'imagenet-1k-id2label.json'
__lowerCamelCase = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) )
__lowerCamelCase = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
__lowerCamelCase = {v: k for k, v in idalabel.items()}
__lowerCamelCase = FocalNetConfig(
embed_dim=UpperCamelCase__ , depths=UpperCamelCase__ , focal_levels=UpperCamelCase__ , focal_windows=UpperCamelCase__ , use_conv_embed=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ , use_post_layernorm=UpperCamelCase__ , use_layerscale=UpperCamelCase__ , )
return config
def lowerCamelCase_ ( UpperCamelCase__ : Any ) -> str:
"""simple docstring"""
if "patch_embed.proj" in name:
__lowerCamelCase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__lowerCamelCase = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
__lowerCamelCase = 'encoder.' + name
if "encoder.layers" in name:
__lowerCamelCase = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
__lowerCamelCase = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
__lowerCamelCase = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__lowerCamelCase = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__lowerCamelCase = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__lowerCamelCase = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
__lowerCamelCase = 'layernorm.weight'
if name == "norm.bias":
__lowerCamelCase = 'layernorm.bias'
if "head" in name:
__lowerCamelCase = name.replace('head' , 'classifier' )
else:
__lowerCamelCase = 'focalnet.' + name
return name
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=False ) -> Dict:
"""simple docstring"""
__lowerCamelCase = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
__lowerCamelCase = model_name_to_url[model_name]
print('Checkpoint URL: ' , UpperCamelCase__ )
__lowerCamelCase = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
__lowerCamelCase = state_dict.pop(UpperCamelCase__ )
__lowerCamelCase = val
__lowerCamelCase = get_focalnet_config(UpperCamelCase__ )
__lowerCamelCase = FocalNetForImageClassification(UpperCamelCase__ )
model.eval()
# load state dict
model.load_state_dict(UpperCamelCase__ )
# verify conversion
__lowerCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCamelCase = BitImageProcessor(
do_resize=UpperCamelCase__ , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCamelCase__ , crop_size=224 , do_normalize=UpperCamelCase__ , image_mean=UpperCamelCase__ , image_std=UpperCamelCase__ , )
__lowerCamelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
__lowerCamelCase = processor(images=UpperCamelCase__ , return_tensors='pt' )
__lowerCamelCase = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
__lowerCamelCase = image_transforms(UpperCamelCase__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , UpperCamelCase__ , atol=1E-4 )
__lowerCamelCase = model(**UpperCamelCase__ )
__lowerCamelCase = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__lowerCamelCase = torch.tensor([0.21_66, -0.43_68, 0.21_91] )
elif model_name == "focalnet-tiny-lrf":
__lowerCamelCase = torch.tensor([1.16_69, 0.01_25, -0.16_95] )
elif model_name == "focalnet-small":
__lowerCamelCase = torch.tensor([0.49_17, -0.04_30, 0.13_41] )
elif model_name == "focalnet-small-lrf":
__lowerCamelCase = torch.tensor([-0.25_88, -0.53_42, -0.23_31] )
elif model_name == "focalnet-base":
__lowerCamelCase = torch.tensor([-0.16_55, -0.40_90, -0.17_30] )
elif model_name == "focalnet-base-lrf":
__lowerCamelCase = torch.tensor([0.53_06, -0.04_83, -0.39_28] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print(F"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(F"""{model_name}""" )
processor.push_to_hub(F"""{model_name}""" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="focalnet-tiny",
type=str,
help="Name of the FocalNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub.",
)
__A = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 348 | 0 |
"""simple docstring"""
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
__A = logging.getLogger()
__A = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __lowerCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
def lowercase_ ( self , lowerCamelCase__ ) -> int:
'''simple docstring'''
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
__lowerCamelCase = {'source': 'What is love ?', 'target': 'life'}
__lowerCamelCase = {'train': 12, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
__lowerCamelCase = '\n'.join([contents[field]] * n_lines[split] )
with open(os.path.join(__lowerCamelCase , f"""{split}.{field}""" ) , 'w' ) as f:
f.write(__lowerCamelCase )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = "pytorch" ) -> Dict:
'''simple docstring'''
__lowerCamelCase = self.get_auto_remove_tmp_dir()
__lowerCamelCase = os.path.join(__lowerCamelCase , 'output' )
__lowerCamelCase = os.path.join(__lowerCamelCase , 'data' )
self._create_dummy_data(data_dir=__lowerCamelCase )
__lowerCamelCase = f"""\n --data_dir {data_dir} \\n --output_dir {output_dir} \\n --model_name_or_path facebook/rag-sequence-base \\n --model_type rag_sequence \\n --do_train \\n --do_predict \\n --n_val -1 \\n --val_check_interval 1.0 \\n --train_batch_size 2 \\n --eval_batch_size 1 \\n --max_source_length 25 \\n --max_target_length 25 \\n --val_max_target_length 25 \\n --test_max_target_length 25 \\n --label_smoothing 0.1 \\n --dropout 0.1 \\n --attention_dropout 0.1 \\n --weight_decay 0.001 \\n --adam_epsilon 1e-08 \\n --max_grad_norm 0.1 \\n --lr_scheduler polynomial \\n --learning_rate 3e-04 \\n --num_train_epochs 1 \\n --warmup_steps 4 \\n --gradient_accumulation_steps 1 \\n --distributed-port 8787 \\n --use_dummy_dataset 1 \\n --distributed_retriever {distributed_retriever} \\n """.split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append('--fp16' )
else:
testargs.append('--gpus=0' )
testargs.append('--distributed_backend=ddp_cpu' )
testargs.append('--num_processes=2' )
__lowerCamelCase = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(__lowerCamelCase , env=self.get_env() )
__lowerCamelCase = os.path.join(__lowerCamelCase , 'metrics.json' )
with open(__lowerCamelCase ) as f:
__lowerCamelCase = json.load(__lowerCamelCase )
return result
@require_torch_gpu
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_gpu
@require_ray
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
@require_ray
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
| 367 |
from __future__ import annotations
def lowerCamelCase_ ( UpperCamelCase__ : list[float] , UpperCamelCase__ : list[float] ) -> float:
"""simple docstring"""
__lowerCamelCase = sorted(numsa + numsa )
__lowerCamelCase , __lowerCamelCase = divmod(len(UpperCamelCase__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = [float(x) for x in input("Enter the elements of first array: ").split()]
__A = [float(x) for x in input("Enter the elements of second array: ").split()]
print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 348 | 0 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__A = logging.get_logger(__name__) # pylint: disable=invalid-name
__A = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"
@dataclass
class __lowerCAmelCase ( A_ ):
"""simple docstring"""
snake_case_ = 42
class __lowerCAmelCase ( A_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
self.register_modules(
prior=_lowerCamelCase , image_encoder=_lowerCamelCase , image_processor=_lowerCamelCase , scheduler=_lowerCamelCase , renderer=_lowerCamelCase , )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if latents is None:
__lowerCamelCase = randn_tensor(_lowerCamelCase , generator=_lowerCamelCase , device=_lowerCamelCase , dtype=_lowerCamelCase )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
__lowerCamelCase = latents.to(_lowerCamelCase )
__lowerCamelCase = latents * scheduler.init_noise_sigma
return latents
def lowercase_ ( self , lowerCamelCase__=0 ) -> Tuple:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__lowerCamelCase = torch.device(f"""cuda:{gpu_id}""" )
__lowerCamelCase = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowerCamelCase , _lowerCamelCase )
@property
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_lowerCamelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> Dict:
'''simple docstring'''
if isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(image[0] , torch.Tensor ):
__lowerCamelCase = torch.cat(_lowerCamelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(_lowerCamelCase , axis=0 )
if not isinstance(_lowerCamelCase , torch.Tensor ):
__lowerCamelCase = self.image_processor(_lowerCamelCase , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
__lowerCamelCase = image.to(dtype=self.image_encoder.dtype , device=_lowerCamelCase )
__lowerCamelCase = self.image_encoder(_lowerCamelCase )['last_hidden_state']
__lowerCamelCase = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
__lowerCamelCase = image_embeds.repeat_interleave(_lowerCamelCase , dim=0 )
if do_classifier_free_guidance:
__lowerCamelCase = torch.zeros_like(_lowerCamelCase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCamelCase = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_lowerCamelCase )
def __call__( self , lowerCamelCase__ , lowerCamelCase__ = 1 , lowerCamelCase__ = 25 , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 4.0 , lowerCamelCase__ = 64 , lowerCamelCase__ = "pil" , lowerCamelCase__ = True , ) -> Any:
'''simple docstring'''
if isinstance(_lowerCamelCase , PIL.Image.Image ):
__lowerCamelCase = 1
elif isinstance(_lowerCamelCase , torch.Tensor ):
__lowerCamelCase = image.shape[0]
elif isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
__lowerCamelCase = len(_lowerCamelCase )
else:
raise ValueError(
f"""`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_lowerCamelCase )}""" )
__lowerCamelCase = self._execution_device
__lowerCamelCase = batch_size * num_images_per_prompt
__lowerCamelCase = guidance_scale > 1.0
__lowerCamelCase = self._encode_image(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# prior
self.scheduler.set_timesteps(_lowerCamelCase , device=_lowerCamelCase )
__lowerCamelCase = self.scheduler.timesteps
__lowerCamelCase = self.prior.config.num_embeddings
__lowerCamelCase = self.prior.config.embedding_dim
__lowerCamelCase = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
__lowerCamelCase = latents.reshape(latents.shape[0] , _lowerCamelCase , _lowerCamelCase )
for i, t in enumerate(self.progress_bar(_lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
__lowerCamelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCamelCase = self.scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
__lowerCamelCase = self.prior(
_lowerCamelCase , timestep=_lowerCamelCase , proj_embedding=_lowerCamelCase , ).predicted_image_embedding
# remove the variance
__lowerCamelCase , __lowerCamelCase = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
__lowerCamelCase , __lowerCamelCase = noise_pred.chunk(2 )
__lowerCamelCase = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
__lowerCamelCase = self.scheduler.step(
_lowerCamelCase , timestep=_lowerCamelCase , sample=_lowerCamelCase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_lowerCamelCase )
__lowerCamelCase = []
for i, latent in enumerate(_lowerCamelCase ):
print()
__lowerCamelCase = self.renderer.decode(
latent[None, :] , _lowerCamelCase , size=_lowerCamelCase , ray_batch_size=4_096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(_lowerCamelCase )
__lowerCamelCase = torch.stack(_lowerCamelCase )
if output_type not in ["np", "pil"]:
raise ValueError(f"""Only the output types `pil` and `np` are supported not output_type={output_type}""" )
__lowerCamelCase = images.cpu().numpy()
if output_type == "pil":
__lowerCamelCase = [self.numpy_to_pil(_lowerCamelCase ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_lowerCamelCase )
| 368 |
__A = {
"joule": 1.0,
"kilojoule": 10_00,
"megajoule": 1_00_00_00,
"gigajoule": 10_00_00_00_00,
"wattsecond": 1.0,
"watthour": 36_00,
"kilowatthour": 3_60_00_00,
"newtonmeter": 1.0,
"calorie_nutr": 41_86.8,
"kilocalorie_nutr": 4_18_68_00.00,
"electronvolt": 1.6_0_2_1_7_6_6_3_4e-1_9,
"britishthermalunit_it": 10_55.0_55_85,
"footpound": 1.3_5_5_8_1_8,
}
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__lowerCamelCase = (
F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {', '.join(UpperCamelCase__ )}"""
)
raise ValueError(UpperCamelCase__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348 | 0 |
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = {} # Mapping from char to TrieNode
__lowerCamelCase = False
def lowercase_ ( self , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
for word in words:
self.insert(_a )
def lowercase_ ( self , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = self
for char in word:
if char not in curr.nodes:
__lowerCamelCase = TrieNode()
__lowerCamelCase = curr.nodes[char]
__lowerCamelCase = True
def lowercase_ ( self , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = self
for char in word:
if char not in curr.nodes:
return False
__lowerCamelCase = curr.nodes[char]
return curr.is_leaf
def lowercase_ ( self , lowerCamelCase__ ) -> str:
'''simple docstring'''
def _delete(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> bool:
if index == len(_a ):
# If word does not exist
if not curr.is_leaf:
return False
__lowerCamelCase = False
return len(curr.nodes ) == 0
__lowerCamelCase = word[index]
__lowerCamelCase = curr.nodes.get(_a )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
__lowerCamelCase = _delete(_a , _a , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , _a , 0 )
def lowerCamelCase_ ( UpperCamelCase__ : TrieNode , UpperCamelCase__ : str ) -> None:
"""simple docstring"""
if node.is_leaf:
print(_snake_case , end=' ' )
for key, value in node.nodes.items():
print_words(_snake_case , word + key )
def lowerCamelCase_ ( ) -> bool:
"""simple docstring"""
__lowerCamelCase = "banana bananas bandana band apple all beast".split()
__lowerCamelCase = TrieNode()
root.insert_many(_snake_case )
# print_words(root, "")
assert all(root.find(_snake_case ) for word in words )
assert root.find('banana' )
assert not root.find('bandanas' )
assert not root.find('apps' )
assert root.find('apple' )
assert root.find('all' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : bool ) -> None:
"""simple docstring"""
print(str(_snake_case ) , 'works!' if passes else 'doesn\'t work :(' )
def lowerCamelCase_ ( ) -> None:
"""simple docstring"""
assert test_trie()
def lowerCamelCase_ ( ) -> None:
"""simple docstring"""
print_results('Testing trie functionality' , test_trie() )
if __name__ == "__main__":
main()
| 369 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''philschmid/bart-large-cnn-samsum'''
snake_case_ = (
'''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '''
'''and returns a summary of the text.'''
)
snake_case_ = '''summarizer'''
snake_case_ = AutoTokenizer
snake_case_ = AutoModelForSeqaSeqLM
snake_case_ = ['''text''']
snake_case_ = ['''text''']
def lowercase_ ( self , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return self.pre_processor(lowerCamelCase__ , return_tensors='pt' , truncation=lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
return self.model.generate(**lowerCamelCase__ )[0]
def lowercase_ ( self , lowerCamelCase__ ) -> Any:
'''simple docstring'''
return self.pre_processor.decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ )
| 348 | 0 |
def lowerCamelCase_ ( UpperCamelCase__ : str = "The quick brown fox jumps over the lazy dog" , ) -> Optional[Any]:
"""simple docstring"""
__lowerCamelCase = set()
# Replace all the whitespace in our sentence
__lowerCamelCase = input_str.replace(' ' , '' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(UpperCamelCase__ ) == 26
def lowerCamelCase_ ( UpperCamelCase__ : str = "The quick brown fox jumps over the lazy dog" , ) -> Optional[Any]:
"""simple docstring"""
__lowerCamelCase = [False] * 26
for char in input_str:
if char.islower():
__lowerCamelCase = True
elif char.isupper():
__lowerCamelCase = True
return all(UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : str = "The quick brown fox jumps over the lazy dog" , ) -> Any:
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def lowerCamelCase_ ( ) -> Tuple:
"""simple docstring"""
from timeit import timeit
__lowerCamelCase = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit('is_pangram()' , setup=UpperCamelCase__ ) )
print(timeit('is_pangram_faster()' , setup=UpperCamelCase__ ) )
print(timeit('is_pangram_fastest()' , setup=UpperCamelCase__ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 370 |
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=16 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=4 , ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_attention_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_choices
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_attention_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = True
__lowerCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = True
snake_case_ = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowerCamelCase = model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase__ )
__lowerCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase__ )
__lowerCamelCase = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
__lowerCamelCase = model(lowerCamelCase__ )[0]
__lowerCamelCase = [1, 11, 50_265]
self.assertEqual(list(output.shape ) , lowerCamelCase__ )
# compare the actual values for a slice.
__lowerCamelCase = np.array(
[[[40.48_80, 18.01_99, -5.23_67], [-1.88_77, -4.08_85, 10.70_85], [-2.26_13, -5.61_10, 7.26_65]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
@slow
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase__ )
__lowerCamelCase = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
__lowerCamelCase = model(lowerCamelCase__ )[0]
# compare the actual values for a slice.
__lowerCamelCase = np.array(
[[[0.02_08, -0.03_56, 0.02_37], [-0.15_69, -0.04_11, -0.26_26], [0.18_79, 0.01_25, -0.00_89]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 348 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=snake_case_ ).to(snake_case_ )
__lowerCamelCase = AutoTokenizer.from_pretrained('google/mt5-small' )
__lowerCamelCase = tokenizer('Hello there' , return_tensors='pt' ).input_ids
__lowerCamelCase = tokenizer('Hi I am' , return_tensors='pt' ).input_ids
__lowerCamelCase = model(input_ids.to(snake_case_ ) , labels=labels.to(snake_case_ ) ).loss
__lowerCamelCase = -(labels.shape[-1] * loss.item())
__lowerCamelCase = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 371 |
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
| 348 | 0 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __lowerCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case_ = StableUnCLIPPipeline
snake_case_ = TEXT_TO_IMAGE_PARAMS
snake_case_ = TEXT_TO_IMAGE_BATCH_PARAMS
snake_case_ = TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case_ = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
snake_case_ = False
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = 32
__lowerCamelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__lowerCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__lowercase , projection_dim=__lowercase , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
__lowerCamelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=__lowercase , num_layers=1 , )
torch.manual_seed(0 )
__lowerCamelCase = DDPMScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1_000 , clip_sample=__lowercase , clip_sample_range=5.0 , beta_schedule='squaredcos_cap_v2' , )
# regular denoising components
torch.manual_seed(0 )
__lowerCamelCase = StableUnCLIPImageNormalizer(embedding_dim=__lowercase )
__lowerCamelCase = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__lowercase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__lowercase , layers_per_block=1 , upcast_attention=__lowercase , use_linear_projection=__lowercase , )
torch.manual_seed(0 )
__lowerCamelCase = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type='v_prediction' , set_alpha_to_one=__lowercase , steps_offset=1 , )
torch.manual_seed(0 )
__lowerCamelCase = AutoencoderKL()
__lowerCamelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=0 ) -> Tuple:
'''simple docstring'''
if str(__lowercase ).startswith('mps' ):
__lowerCamelCase = torch.manual_seed(__lowercase )
else:
__lowerCamelCase = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__lowerCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=__lowercase )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=__lowercase )
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> int:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy' )
__lowerCamelCase = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowerCamelCase = torch.Generator(device='cpu' ).manual_seed(0 )
__lowerCamelCase = pipe('anime turle' , generator=__lowercase , output_type='np' )
__lowerCamelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase , __lowercase )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCamelCase = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
__lowerCamelCase = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowerCamelCase = pipe(
'anime turtle' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='np' , )
__lowerCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 350 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def lowercase_ ( self , lowerCamelCase__=0 ) -> int:
'''simple docstring'''
__lowerCamelCase = floats_tensor((1, 3, 128, 128) , rng=random.Random(lowerCamelCase__ ) )
__lowerCamelCase = np.random.RandomState(lowerCamelCase__ )
__lowerCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__lowerCamelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__lowerCamelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
# warmup pass to apply optimizations
__lowerCamelCase = pipe(**self.get_dummy_inputs() )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__lowerCamelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__lowerCamelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__lowerCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def lowercase_ ( self ) -> int:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = ort.SessionOptions()
__lowerCamelCase = False
return options
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
__lowerCamelCase = init_image.resize((768, 512) )
# using the PNDM scheduler by default
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = 'A fantasy landscape, trending on artstation'
__lowerCamelCase = np.random.RandomState(0 )
__lowerCamelCase = pipe(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCamelCase__ , output_type='np' , )
__lowerCamelCase = output.images
__lowerCamelCase = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__lowerCamelCase = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
__lowerCamelCase = init_image.resize((768, 512) )
__lowerCamelCase = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=lowerCamelCase__ , safety_checker=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = 'A fantasy landscape, trending on artstation'
__lowerCamelCase = np.random.RandomState(0 )
__lowerCamelCase = pipe(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=lowerCamelCase__ , output_type='np' , )
__lowerCamelCase = output.images
__lowerCamelCase = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__lowerCamelCase = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 348 | 0 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
__A = get_logger(__name__)
class __lowerCAmelCase ( enum.Enum ):
"""simple docstring"""
snake_case_ = '''all_checks'''
snake_case_ = '''basic_checks'''
snake_case_ = '''no_checks'''
class __lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
pass
class __lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
pass
class __lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
pass
class __lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
pass
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple=None ) -> str:
"""simple docstring"""
if expected_checksums is None:
logger.info('Unable to verify checksums.' )
return
if len(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) )
if len(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) )
__lowerCamelCase = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
__lowerCamelCase = ' for ' + verification_name if verification_name is not None else ''
if len(_UpperCAmelCase ) > 0:
raise NonMatchingChecksumError(
F"""Checksums didn't match{for_verification_name}:\n"""
F"""{bad_urls}\n"""
'Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error' )
logger.info('All the checksums matched successfully' + for_verification_name )
class __lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
pass
class __lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
pass
class __lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
pass
class __lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
pass
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
if expected_splits is None:
logger.info('Unable to verify splits sizes.' )
return
if len(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) > 0:
raise ExpectedMoreSplits(str(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) )
if len(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) > 0:
raise UnexpectedSplits(str(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) )
__lowerCamelCase = [
{'expected': expected_splits[name], 'recorded': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(_UpperCAmelCase ) > 0:
raise NonMatchingSplitsSizesError(str(_UpperCAmelCase ) )
logger.info('All the splits matched successfully.' )
def lowerCamelCase_ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Any = True ) -> dict:
"""simple docstring"""
if record_checksum:
__lowerCamelCase = shaaaa()
with open(_UpperCAmelCase , 'rb' ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , b'' ):
m.update(_UpperCAmelCase )
__lowerCamelCase = m.hexdigest()
else:
__lowerCamelCase = None
return {"num_bytes": os.path.getsize(_UpperCAmelCase ), "checksum": checksum}
def lowerCamelCase_ ( UpperCamelCase__ : Tuple ) -> Optional[int]:
"""simple docstring"""
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 351 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__A = logging.get_logger(__name__)
__A = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
__A = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_57, 3_66, 4_38, 5_32, 6_85,
7_05, 7_96, 9_30, 10_58, 12_20, 12_67, 12_79, 13_03, 13_43, 13_77,
13_91, 16_35, 17_82, 18_75, 21_62, 23_61, 24_88, 34_67, 40_08, 42_11,
46_00, 48_08, 52_99, 58_55, 63_29, 72_03, 96_09, 99_59, 1_05_63, 1_07_86,
1_14_20, 1_17_09, 1_19_07, 1_31_63, 1_36_97, 1_37_00, 1_48_08, 1_53_06, 1_64_10, 1_67_91,
1_79_92, 1_92_03, 1_95_10, 2_07_24, 2_23_05, 2_29_35, 2_70_07, 3_01_09, 3_04_20, 3_34_09,
3_49_49, 4_02_83, 4_04_93, 4_05_49, 4_72_82, 4_91_46, 5_02_57, 5_03_59, 5_03_60, 5_03_61
]
__A = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_59, 5_03, 5_22, 5_42, 8_73,
8_93, 9_02, 9_18, 9_22, 9_31, 13_50, 18_53, 19_82, 24_60, 26_27,
32_46, 32_53, 32_68, 35_36, 38_46, 39_61, 41_83, 46_67, 65_85, 66_47,
72_73, 90_61, 93_83, 1_04_28, 1_09_29, 1_19_38, 1_20_33, 1_23_31, 1_25_62, 1_37_93,
1_41_57, 1_46_35, 1_52_65, 1_56_18, 1_65_53, 1_66_04, 1_83_62, 1_89_56, 2_00_75, 2_16_75,
2_25_20, 2_61_30, 2_61_61, 2_64_35, 2_82_79, 2_94_64, 3_16_50, 3_23_02, 3_24_70, 3_68_65,
4_28_63, 4_74_25, 4_98_70, 5_02_54, 5_02_58, 5_03_60, 5_03_61, 5_03_62
]
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''whisper'''
snake_case_ = ['''past_key_values''']
snake_case_ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowerCamelCase__=51_865 , lowerCamelCase__=80 , lowerCamelCase__=6 , lowerCamelCase__=4 , lowerCamelCase__=6 , lowerCamelCase__=4 , lowerCamelCase__=1_536 , lowerCamelCase__=1_536 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=50_257 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__="gelu" , lowerCamelCase__=256 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.02 , lowerCamelCase__=False , lowerCamelCase__=1_500 , lowerCamelCase__=448 , lowerCamelCase__=50_256 , lowerCamelCase__=50_256 , lowerCamelCase__=50_256 , lowerCamelCase__=None , lowerCamelCase__=[220, 50_256] , lowerCamelCase__=False , lowerCamelCase__=256 , lowerCamelCase__=False , lowerCamelCase__=0.05 , lowerCamelCase__=10 , lowerCamelCase__=2 , lowerCamelCase__=0.0 , lowerCamelCase__=10 , lowerCamelCase__=0 , lowerCamelCase__=7 , **lowerCamelCase__ , ) -> str:
'''simple docstring'''
__lowerCamelCase = vocab_size
__lowerCamelCase = num_mel_bins
__lowerCamelCase = d_model
__lowerCamelCase = encoder_layers
__lowerCamelCase = encoder_attention_heads
__lowerCamelCase = decoder_layers
__lowerCamelCase = decoder_attention_heads
__lowerCamelCase = decoder_ffn_dim
__lowerCamelCase = encoder_ffn_dim
__lowerCamelCase = dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = activation_function
__lowerCamelCase = init_std
__lowerCamelCase = encoder_layerdrop
__lowerCamelCase = decoder_layerdrop
__lowerCamelCase = use_cache
__lowerCamelCase = encoder_layers
__lowerCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCamelCase = max_source_positions
__lowerCamelCase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
__lowerCamelCase = classifier_proj_size
__lowerCamelCase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCamelCase = apply_spec_augment
__lowerCamelCase = mask_time_prob
__lowerCamelCase = mask_time_length
__lowerCamelCase = mask_time_min_masks
__lowerCamelCase = mask_feature_prob
__lowerCamelCase = mask_feature_length
__lowerCamelCase = mask_feature_min_masks
__lowerCamelCase = median_filter_width
super().__init__(
pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , is_encoder_decoder=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , suppress_tokens=lowerCamelCase__ , begin_suppress_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
@property
def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
__lowerCamelCase = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
__lowerCamelCase = {0: 'batch'}
else:
__lowerCamelCase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase__ , direction='inputs' )
return common_inputs
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = -1 , lowerCamelCase__ = -1 , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = 22_050 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 220 , ) -> Mapping[str, Any]:
'''simple docstring'''
__lowerCamelCase = OrderedDict()
__lowerCamelCase = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowerCamelCase__ , framework=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , time_duration=lowerCamelCase__ , frequency=lowerCamelCase__ , )
__lowerCamelCase = encoder_inputs['input_features'].shape[2]
__lowerCamelCase = encoder_sequence_length // 2 if self.use_past else seq_length
__lowerCamelCase = super().generate_dummy_inputs(
preprocessor.tokenizer , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = encoder_inputs.pop('input_features' )
__lowerCamelCase = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
__lowerCamelCase = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def lowercase_ ( self ) -> float:
'''simple docstring'''
return 1e-3
| 348 | 0 |
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCamelCase = TapasConfig.from_json_file(UpperCamelCase__ )
# set absolute/relative position embeddings parameter
__lowerCamelCase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__lowerCamelCase = TapasForQuestionAnswering(config=UpperCamelCase__ )
elif task == "WTQ":
# run_task_main.py hparams
__lowerCamelCase = 4
__lowerCamelCase = True
# hparam_utils.py hparams
__lowerCamelCase = 0.66_46_94
__lowerCamelCase = 0.20_79_51
__lowerCamelCase = 0.12_11_94
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = False
__lowerCamelCase = 0.0_35_25_13
__lowerCamelCase = TapasForQuestionAnswering(config=UpperCamelCase__ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__lowerCamelCase = 4
__lowerCamelCase = False
# hparam_utils.py hparams
__lowerCamelCase = 36.45_19
__lowerCamelCase = 0.90_34_21
__lowerCamelCase = 222.088
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = 0.76_31_41
__lowerCamelCase = TapasForQuestionAnswering(config=UpperCamelCase__ )
elif task == "TABFACT":
__lowerCamelCase = TapasForSequenceClassification(config=UpperCamelCase__ )
elif task == "MLM":
__lowerCamelCase = TapasForMaskedLM(config=UpperCamelCase__ )
elif task == "INTERMEDIATE_PRETRAINING":
__lowerCamelCase = TapasModel(config=UpperCamelCase__ )
else:
raise ValueError(F"""Task {task} not supported.""" )
print(F"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model (weights and configuration)
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(UpperCamelCase__ )
# Save tokenizer files
print(F"""Save tokenizer files to {pytorch_dump_path}""" )
__lowerCamelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + 'vocab.txt' , model_max_length=512 )
tokenizer.save_pretrained(UpperCamelCase__ )
print('Used relative position embeddings:' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task", default="SQA", type=str, help="Model task for which to convert a checkpoint. Defaults to SQA."
)
parser.add_argument(
"--reset_position_index_per_cell",
default=False,
action="store_true",
help="Whether to use relative position embeddings or not. Defaults to True.",
)
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--tapas_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained TAPAS model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 352 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=14 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=0.02 , ) -> int:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = rotary_dim
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = initializer_range
__lowerCamelCase = None
__lowerCamelCase = vocab_size - 1
__lowerCamelCase = vocab_size - 1
__lowerCamelCase = vocab_size - 1
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowerCamelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
__lowerCamelCase = 20
__lowerCamelCase = model_class_name(lowerCamelCase__ )
__lowerCamelCase = model.init_cache(input_ids.shape[0] , lowerCamelCase__ )
__lowerCamelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
__lowerCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCamelCase = model(
input_ids[:, :-1] , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , position_ids=lowerCamelCase__ , )
__lowerCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCamelCase = model(
input_ids[:, -1:] , attention_mask=lowerCamelCase__ , past_key_values=outputs_cache.past_key_values , position_ids=lowerCamelCase__ , )
__lowerCamelCase = model(lowerCamelCase__ )
__lowerCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = 20
__lowerCamelCase = model_class_name(lowerCamelCase__ )
__lowerCamelCase = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__lowerCamelCase = model.init_cache(input_ids.shape[0] , lowerCamelCase__ )
__lowerCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCamelCase = model(
input_ids[:, :-1] , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , position_ids=lowerCamelCase__ , )
__lowerCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCamelCase = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowerCamelCase__ , position_ids=lowerCamelCase__ , )
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
__lowerCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
snake_case_ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = FlaxGPTJModelTester(self )
def lowercase_ ( self ) -> str:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
@tooslow
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
__lowerCamelCase = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=lowerCamelCase__ , truncation=lowerCamelCase__ )
__lowerCamelCase = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
__lowerCamelCase = False
__lowerCamelCase = model.config.eos_token_id
__lowerCamelCase = jax.jit(model.generate )
__lowerCamelCase = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
__lowerCamelCase = tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
__lowerCamelCase = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@is_pt_flax_cross_test
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCamelCase = getattr(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase = pt_inputs['input_ids'].shape
__lowerCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase__ ):
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = pt_model_class(lowerCamelCase__ ).eval()
__lowerCamelCase = model_class(lowerCamelCase__ , dtype=jnp.floataa )
__lowerCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCamelCase__ )
__lowerCamelCase = fx_state
with torch.no_grad():
__lowerCamelCase = pt_model(**lowerCamelCase__ ).to_tuple()
__lowerCamelCase = fx_model(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase__ )
__lowerCamelCase = model_class.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
__lowerCamelCase = fx_model_loaded(**lowerCamelCase__ ).to_tuple()
self.assertEqual(
len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCamelCase = getattr(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = pt_model_class(lowerCamelCase__ ).eval()
__lowerCamelCase = model_class(lowerCamelCase__ , dtype=jnp.floataa )
__lowerCamelCase = load_flax_weights_in_pytorch_model(lowerCamelCase__ , fx_model.params )
__lowerCamelCase , __lowerCamelCase = pt_inputs['input_ids'].shape
__lowerCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase__ ):
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = 0
__lowerCamelCase = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__lowerCamelCase = pt_model(**lowerCamelCase__ ).to_tuple()
__lowerCamelCase = fx_model(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase__ )
__lowerCamelCase = pt_model_class.from_pretrained(lowerCamelCase__ , from_flax=lowerCamelCase__ )
with torch.no_grad():
__lowerCamelCase = pt_model_loaded(**lowerCamelCase__ ).to_tuple()
self.assertEqual(
len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowerCamelCase = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
__lowerCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
| 348 | 0 |
__A = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__A = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = True
__lowerCamelCase = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
order.append(lowerCamelCase__ )
return order
def lowerCamelCase_ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Dict ) -> str:
"""simple docstring"""
__lowerCamelCase = True
__lowerCamelCase = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return component
def lowerCamelCase_ ( UpperCamelCase__ : Any ) -> Optional[Any]:
"""simple docstring"""
__lowerCamelCase = len(lowerCamelCase__ ) * [False]
__lowerCamelCase = {vert: [] for vert in range(len(lowerCamelCase__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(lowerCamelCase__ )
__lowerCamelCase = []
for i, was_visited in enumerate(lowerCamelCase__ ):
if not was_visited:
order += topology_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = []
__lowerCamelCase = len(lowerCamelCase__ ) * [False]
for i in range(len(lowerCamelCase__ ) ):
__lowerCamelCase = order[len(lowerCamelCase__ ) - i - 1]
if not visited[vert]:
__lowerCamelCase = find_components(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
components_list.append(lowerCamelCase__ )
return components_list
| 353 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
__A = False
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase_ ( self ) -> int:
'''simple docstring'''
return 12
@property
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return 12
@property
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
return 32
@property
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(lowerCamelCase__ )
@property
def lowercase_ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = 12
__lowerCamelCase = 12
__lowerCamelCase = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
__lowerCamelCase = TransformeraDModel(**lowerCamelCase__ )
return model
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = 'cpu'
__lowerCamelCase = self.dummy_vqvae
__lowerCamelCase = self.dummy_text_encoder
__lowerCamelCase = self.dummy_tokenizer
__lowerCamelCase = self.dummy_transformer
__lowerCamelCase = VQDiffusionScheduler(self.num_embed )
__lowerCamelCase = LearnedClassifierFreeSamplingEmbeddings(learnable=lowerCamelCase__ )
__lowerCamelCase = VQDiffusionPipeline(
vqvae=lowerCamelCase__ , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ , transformer=lowerCamelCase__ , scheduler=lowerCamelCase__ , learned_classifier_free_sampling_embeddings=lowerCamelCase__ , )
__lowerCamelCase = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = 'teddy bear playing in the pool'
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
__lowerCamelCase = pipe([prompt] , generator=lowerCamelCase__ , num_inference_steps=2 , output_type='np' )
__lowerCamelCase = output.images
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
__lowerCamelCase = pipe(
[prompt] , generator=lowerCamelCase__ , output_type='np' , return_dict=lowerCamelCase__ , num_inference_steps=2 )[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowerCamelCase = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = 'cpu'
__lowerCamelCase = self.dummy_vqvae
__lowerCamelCase = self.dummy_text_encoder
__lowerCamelCase = self.dummy_tokenizer
__lowerCamelCase = self.dummy_transformer
__lowerCamelCase = VQDiffusionScheduler(self.num_embed )
__lowerCamelCase = LearnedClassifierFreeSamplingEmbeddings(
learnable=lowerCamelCase__ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
__lowerCamelCase = VQDiffusionPipeline(
vqvae=lowerCamelCase__ , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ , transformer=lowerCamelCase__ , scheduler=lowerCamelCase__ , learned_classifier_free_sampling_embeddings=lowerCamelCase__ , )
__lowerCamelCase = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = 'teddy bear playing in the pool'
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
__lowerCamelCase = pipe([prompt] , generator=lowerCamelCase__ , num_inference_steps=2 , output_type='np' )
__lowerCamelCase = output.images
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
__lowerCamelCase = pipe(
[prompt] , generator=lowerCamelCase__ , output_type='np' , return_dict=lowerCamelCase__ , num_inference_steps=2 )[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowerCamelCase = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' )
__lowerCamelCase = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' )
__lowerCamelCase = pipeline.to(lowerCamelCase__ )
pipeline.set_progress_bar_config(disable=lowerCamelCase__ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
__lowerCamelCase = pipeline(
'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=lowerCamelCase__ , output_type='np' , )
__lowerCamelCase = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 348 | 0 |
import argparse
import os
import re
__A = "src/transformers/models/auto"
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
__A = re.compile(R"[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict")
# re pattern that matches identifiers in mappings
__A = re.compile(R"\s*\(\s*\"(\S[^\"]+)\"")
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : List[str] = False ) -> Union[str, Any]:
"""simple docstring"""
with open(_lowerCAmelCase , 'r' , encoding='utf-8' ) as f:
__lowerCamelCase = f.read()
__lowerCamelCase = content.split('\n' )
__lowerCamelCase = []
__lowerCamelCase = 0
while line_idx < len(_lowerCAmelCase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
__lowerCamelCase = len(re.search(R'^(\s*)\S' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(' ' * indent + '(' ):
new_lines.append(lines[line_idx] )
line_idx += 1
__lowerCamelCase = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__lowerCamelCase = line_idx
while not lines[line_idx].startswith(' ' * indent + ')' ):
line_idx += 1
blocks.append('\n'.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
__lowerCamelCase = sorted(_lowerCAmelCase , key=lambda UpperCamelCase__ : _re_identifier.search(_lowerCAmelCase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(_lowerCAmelCase ) )
elif "\n".join(_lowerCAmelCase ) != content:
return True
def lowerCamelCase_ ( UpperCamelCase__ : str = False ) -> str:
"""simple docstring"""
__lowerCamelCase = [os.path.join(_lowerCAmelCase , _lowerCAmelCase ) for f in os.listdir(_lowerCAmelCase ) if f.endswith('.py' )]
__lowerCamelCase = [sort_auto_mapping(_lowerCAmelCase , overwrite=_lowerCAmelCase ) for fname in fnames]
if not overwrite and any(_lowerCAmelCase ):
__lowerCamelCase = [f for f, d in zip(_lowerCAmelCase , _lowerCAmelCase ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {', '.join(_lowerCAmelCase )}. Run `make style` to fix"""
' this.' )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
__A = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 354 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=2 , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=10 , lowerCamelCase__=3 , lowerCamelCase__=32 * 8 , lowerCamelCase__=32 * 8 , lowerCamelCase__=4 , lowerCamelCase__=64 , ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = is_training
__lowerCamelCase = use_auxiliary_loss
__lowerCamelCase = num_queries
__lowerCamelCase = num_channels
__lowerCamelCase = min_size
__lowerCamelCase = max_size
__lowerCamelCase = num_labels
__lowerCamelCase = hidden_dim
__lowerCamelCase = hidden_dim
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCamelCase__ )
__lowerCamelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase__ )
__lowerCamelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase__ ) > 0.5
).float()
__lowerCamelCase = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase__ ) > 0.5).long()
__lowerCamelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
__lowerCamelCase = self.num_queries
__lowerCamelCase = self.num_labels
__lowerCamelCase = [1, 1, 1, 1]
__lowerCamelCase = self.num_channels
__lowerCamelCase = 64
__lowerCamelCase = 128
__lowerCamelCase = self.hidden_dim
__lowerCamelCase = self.hidden_dim
__lowerCamelCase = self.hidden_dim
return config
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
__lowerCamelCase = output.encoder_hidden_states
__lowerCamelCase = output.pixel_decoder_hidden_states
__lowerCamelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , config.decoder_layers )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ) -> Tuple:
'''simple docstring'''
with torch.no_grad():
__lowerCamelCase = MaskaFormerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = MaskaFormerForUniversalSegmentation(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
def comm_check_on_output(lowerCamelCase__ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__lowerCamelCase = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
__lowerCamelCase = model(
pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
snake_case_ = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = MaskaFormerModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCamelCase__ )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
pass
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(lowerCamelCase__ )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
@slow
def lowercase_ ( self ) -> int:
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
__lowerCamelCase = MaskaFormerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = (self.model_tester.min_size,) * 2
__lowerCamelCase = {
'pixel_values': torch.randn((2, 3, *size) , device=lowerCamelCase__ ),
'mask_labels': torch.randn((2, 10, *size) , device=lowerCamelCase__ ),
'class_labels': torch.zeros(2 , 10 , device=lowerCamelCase__ ).long(),
}
__lowerCamelCase = self.model_tester.get_config()
__lowerCamelCase = MaskaFormerForUniversalSegmentation(lowerCamelCase__ ).to(lowerCamelCase__ )
__lowerCamelCase = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
__lowerCamelCase = model(**lowerCamelCase__ , output_attentions=lowerCamelCase__ )
self.assertTrue(outputs.attentions is not None )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
__lowerCamelCase = self.all_model_classes[1]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
__lowerCamelCase = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ).loss
loss.backward()
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = self.all_model_classes[1]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
model.train()
__lowerCamelCase = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
__lowerCamelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__lowerCamelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
__lowerCamelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__lowerCamelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCamelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__A = 1e-4
def lowerCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ )
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ )
__lowerCamelCase = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 384, 384) )
with torch.no_grad():
__lowerCamelCase = model(**lowerCamelCase__ )
__lowerCamelCase = torch.tensor(
[[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
__lowerCamelCase = torch.tensor(
[[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
__lowerCamelCase = torch.tensor(
[[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ )
__lowerCamelCase = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 384, 384) )
with torch.no_grad():
__lowerCamelCase = model(**lowerCamelCase__ )
# masks_queries_logits
__lowerCamelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
__lowerCamelCase = [
[-8.78_39, -9.00_56, -8.81_21],
[-7.41_04, -7.03_13, -6.54_01],
[-6.61_05, -6.34_27, -6.46_75],
]
__lowerCamelCase = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
# class_queries_logits
__lowerCamelCase = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
__lowerCamelCase = torch.tensor(
[
[1.83_24, -8.08_35, -4.19_22],
[0.84_50, -9.00_50, -3.60_53],
[0.30_45, -7.72_93, -3.02_75],
] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
__lowerCamelCase = inputs['pixel_values'].to(lowerCamelCase__ )
__lowerCamelCase = [el.to(lowerCamelCase__ ) for el in inputs['mask_labels']]
__lowerCamelCase = [el.to(lowerCamelCase__ ) for el in inputs['class_labels']]
with torch.no_grad():
__lowerCamelCase = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
| 348 | 0 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ = 16 , lowerCamelCase__ = 88 , lowerCamelCase__ = None , lowerCamelCase__ = 1 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = 32 , lowerCamelCase__ = None , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "geglu" , lowerCamelCase__ = None , ) -> Dict:
'''simple docstring'''
super().__init__()
__lowerCamelCase = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=_SCREAMING_SNAKE_CASE , attention_head_dim=_SCREAMING_SNAKE_CASE , in_channels=_SCREAMING_SNAKE_CASE , num_layers=_SCREAMING_SNAKE_CASE , dropout=_SCREAMING_SNAKE_CASE , norm_num_groups=_SCREAMING_SNAKE_CASE , cross_attention_dim=_SCREAMING_SNAKE_CASE , attention_bias=_SCREAMING_SNAKE_CASE , sample_size=_SCREAMING_SNAKE_CASE , num_vector_embeds=_SCREAMING_SNAKE_CASE , activation_fn=_SCREAMING_SNAKE_CASE , num_embeds_ada_norm=_SCREAMING_SNAKE_CASE , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
__lowerCamelCase = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
__lowerCamelCase = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
__lowerCamelCase = [1, 0]
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__ = True , ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = hidden_states
__lowerCamelCase = []
__lowerCamelCase = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
__lowerCamelCase = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
__lowerCamelCase = self.transformer_index_for_condition[i]
__lowerCamelCase = self.transformers[transformer_index](
_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , timestep=_SCREAMING_SNAKE_CASE , cross_attention_kwargs=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
__lowerCamelCase = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
__lowerCamelCase = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=_SCREAMING_SNAKE_CASE )
| 355 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A = {
"facebook/mask2former-swin-small-coco-instance": (
"https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
__A = logging.get_logger(__name__)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''mask2former'''
snake_case_ = ['''swin''']
snake_case_ = {'''hidden_size''': '''hidden_dim'''}
def __init__( self , lowerCamelCase__ = None , lowerCamelCase__ = 256 , lowerCamelCase__ = 256 , lowerCamelCase__ = 256 , lowerCamelCase__ = 1_024 , lowerCamelCase__ = "relu" , lowerCamelCase__ = 6 , lowerCamelCase__ = 10 , lowerCamelCase__ = 8 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = 2_048 , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = 4 , lowerCamelCase__ = 255 , lowerCamelCase__ = 100 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 2.0 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 12_544 , lowerCamelCase__ = 3.0 , lowerCamelCase__ = 0.75 , lowerCamelCase__ = 0.02 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = True , lowerCamelCase__ = [4, 8, 16, 32] , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> Tuple:
'''simple docstring'''
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
__lowerCamelCase = CONFIG_MAPPING['swin'](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=lowerCamelCase__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase = backbone_config.pop('model_type' )
__lowerCamelCase = CONFIG_MAPPING[backbone_model_type]
__lowerCamelCase = config_class.from_dict(lowerCamelCase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
f"""Supported model types: {','.join(self.backbones_supported )}""" )
__lowerCamelCase = backbone_config
__lowerCamelCase = feature_size
__lowerCamelCase = mask_feature_size
__lowerCamelCase = hidden_dim
__lowerCamelCase = encoder_feedforward_dim
__lowerCamelCase = activation_function
__lowerCamelCase = encoder_layers
__lowerCamelCase = decoder_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = dropout
__lowerCamelCase = dim_feedforward
__lowerCamelCase = pre_norm
__lowerCamelCase = enforce_input_projection
__lowerCamelCase = common_stride
__lowerCamelCase = ignore_value
__lowerCamelCase = num_queries
__lowerCamelCase = no_object_weight
__lowerCamelCase = class_weight
__lowerCamelCase = mask_weight
__lowerCamelCase = dice_weight
__lowerCamelCase = train_num_points
__lowerCamelCase = oversample_ratio
__lowerCamelCase = importance_sample_ratio
__lowerCamelCase = init_std
__lowerCamelCase = init_xavier_std
__lowerCamelCase = use_auxiliary_loss
__lowerCamelCase = feature_strides
__lowerCamelCase = output_auxiliary_logits
__lowerCamelCase = decoder_layers
super().__init__(**lowerCamelCase__ )
@classmethod
def lowercase_ ( cls , lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
return cls(
backbone_config=lowerCamelCase__ , **lowerCamelCase__ , )
def lowercase_ ( self ) -> Dict[str, any]:
'''simple docstring'''
__lowerCamelCase = copy.deepcopy(self.__dict__ )
__lowerCamelCase = self.backbone_config.to_dict()
__lowerCamelCase = self.__class__.model_type
return output
| 348 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"google/mobilenet_v2_1.4_224": "https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json",
"google/mobilenet_v2_1.0_224": "https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json",
"google/mobilenet_v2_0.75_160": "https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json",
"google/mobilenet_v2_0.35_96": "https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class __lowerCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case_ = '''mobilenet_v2'''
def __init__( self , lowerCamelCase__=3 , lowerCamelCase__=224 , lowerCamelCase__=1.0 , lowerCamelCase__=8 , lowerCamelCase__=8 , lowerCamelCase__=6 , lowerCamelCase__=32 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__="relu6" , lowerCamelCase__=True , lowerCamelCase__=0.8 , lowerCamelCase__=0.02 , lowerCamelCase__=0.0_01 , lowerCamelCase__=255 , **lowerCamelCase__ , ) -> List[str]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
__lowerCamelCase = num_channels
__lowerCamelCase = image_size
__lowerCamelCase = depth_multiplier
__lowerCamelCase = depth_divisible_by
__lowerCamelCase = min_depth
__lowerCamelCase = expand_ratio
__lowerCamelCase = output_stride
__lowerCamelCase = first_layer_is_expansion
__lowerCamelCase = finegrained_output
__lowerCamelCase = hidden_act
__lowerCamelCase = tf_padding
__lowerCamelCase = classifier_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = semantic_loss_ignore_index
class __lowerCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case_ = version.parse('''1.11''' )
@property
def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def lowercase_ ( self ) -> float:
'''simple docstring'''
return 1e-4
| 356 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = 42
class __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
"""simple docstring"""
@register_to_config
def __init__( self , lowerCamelCase__ = 32 , lowerCamelCase__ = 64 , lowerCamelCase__ = 20 , lowerCamelCase__ = 768 , lowerCamelCase__=77 , lowerCamelCase__=4 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = "silu" , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "linear" , lowerCamelCase__ = "prd" , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , ) -> Tuple:
'''simple docstring'''
super().__init__()
__lowerCamelCase = num_attention_heads
__lowerCamelCase = attention_head_dim
__lowerCamelCase = num_attention_heads * attention_head_dim
__lowerCamelCase = additional_embeddings
__lowerCamelCase = time_embed_dim or inner_dim
__lowerCamelCase = embedding_proj_dim or embedding_dim
__lowerCamelCase = clip_embed_dim or embedding_dim
__lowerCamelCase = Timesteps(lowerCamelCase__ , lowerCamelCase__ , 0 )
__lowerCamelCase = TimestepEmbedding(lowerCamelCase__ , lowerCamelCase__ , out_dim=lowerCamelCase__ , act_fn=lowerCamelCase__ )
__lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
if embedding_proj_norm_type is None:
__lowerCamelCase = None
elif embedding_proj_norm_type == "layer":
__lowerCamelCase = nn.LayerNorm(lowerCamelCase__ )
else:
raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
__lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
if encoder_hid_proj_type is None:
__lowerCamelCase = None
elif encoder_hid_proj_type == "linear":
__lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
else:
raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
__lowerCamelCase = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , lowerCamelCase__ ) )
if added_emb_type == "prd":
__lowerCamelCase = nn.Parameter(torch.zeros(1 , 1 , lowerCamelCase__ ) )
elif added_emb_type is None:
__lowerCamelCase = None
else:
raise ValueError(
f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
__lowerCamelCase = nn.ModuleList(
[
BasicTransformerBlock(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , dropout=lowerCamelCase__ , activation_fn='gelu' , attention_bias=lowerCamelCase__ , )
for d in range(lowerCamelCase__ )
] )
if norm_in_type == "layer":
__lowerCamelCase = nn.LayerNorm(lowerCamelCase__ )
elif norm_in_type is None:
__lowerCamelCase = None
else:
raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" )
__lowerCamelCase = nn.LayerNorm(lowerCamelCase__ )
__lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 )
causal_attention_mask.triu_(1 )
__lowerCamelCase = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' , lowerCamelCase__ , persistent=lowerCamelCase__ )
__lowerCamelCase = nn.Parameter(torch.zeros(1 , lowerCamelCase__ ) )
__lowerCamelCase = nn.Parameter(torch.zeros(1 , lowerCamelCase__ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowercase_ ( self ) -> Dict[str, AttentionProcessor]:
'''simple docstring'''
__lowerCamelCase = {}
def fn_recursive_add_processors(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if hasattr(lowerCamelCase__ , 'set_processor' ):
__lowerCamelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , lowerCamelCase__ , lowerCamelCase__ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return processors
def lowercase_ ( self , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = len(self.attn_processors.keys() )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(lowerCamelCase__ )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if hasattr(lowerCamelCase__ , 'set_processor' ):
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
module.set_processor(lowerCamelCase__ )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , lowerCamelCase__ , lowerCamelCase__ )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = True , ) -> int:
'''simple docstring'''
__lowerCamelCase = hidden_states.shape[0]
__lowerCamelCase = timestep
if not torch.is_tensor(lowerCamelCase__ ):
__lowerCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(lowerCamelCase__ ) and len(timesteps.shape ) == 0:
__lowerCamelCase = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__lowerCamelCase = timesteps * torch.ones(lowerCamelCase__ , dtype=timesteps.dtype , device=timesteps.device )
__lowerCamelCase = self.time_proj(lowerCamelCase__ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
__lowerCamelCase = timesteps_projected.to(dtype=self.dtype )
__lowerCamelCase = self.time_embedding(lowerCamelCase__ )
if self.embedding_proj_norm is not None:
__lowerCamelCase = self.embedding_proj_norm(lowerCamelCase__ )
__lowerCamelCase = self.embedding_proj(lowerCamelCase__ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
__lowerCamelCase = self.encoder_hidden_states_proj(lowerCamelCase__ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
__lowerCamelCase = self.proj_in(lowerCamelCase__ )
__lowerCamelCase = self.positional_embedding.to(hidden_states.dtype )
__lowerCamelCase = []
__lowerCamelCase = 0
if encoder_hidden_states is not None:
additional_embeds.append(lowerCamelCase__ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
__lowerCamelCase = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
__lowerCamelCase = hidden_states[:, None, :]
__lowerCamelCase = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
__lowerCamelCase = self.prd_embedding.to(hidden_states.dtype ).expand(lowerCamelCase__ , -1 , -1 )
additional_embeds.append(lowerCamelCase__ )
__lowerCamelCase = torch.cat(
lowerCamelCase__ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
__lowerCamelCase = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
__lowerCamelCase = F.pad(
lowerCamelCase__ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
__lowerCamelCase = hidden_states + positional_embeddings
if attention_mask is not None:
__lowerCamelCase = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
__lowerCamelCase = F.pad(lowerCamelCase__ , (0, self.additional_embeddings) , value=0.0 )
__lowerCamelCase = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
__lowerCamelCase = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
__lowerCamelCase = self.norm_in(lowerCamelCase__ )
for block in self.transformer_blocks:
__lowerCamelCase = block(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
__lowerCamelCase = self.norm_out(lowerCamelCase__ )
if self.prd_embedding is not None:
__lowerCamelCase = hidden_states[:, -1]
else:
__lowerCamelCase = hidden_states[:, additional_embeddings_len:]
__lowerCamelCase = self.proj_to_clip_embeddings(lowerCamelCase__ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 348 | 0 |
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> List[str]:
"""simple docstring"""
assert x is not None
assert y is not None
__lowerCamelCase = len(UpperCamelCase__ )
__lowerCamelCase = len(UpperCamelCase__ )
# declaring the array for storing the dp values
__lowerCamelCase = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
__lowerCamelCase = 1 if x[i - 1] == y[j - 1] else 0
__lowerCamelCase = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
__lowerCamelCase = ''''''
__lowerCamelCase = m, n
while i > 0 and j > 0:
__lowerCamelCase = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
__lowerCamelCase = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
__A = "AGGTAB"
__A = "GXTXAYB"
__A = 4
__A = "GTAB"
__A = longest_common_subsequence(a, b)
print("len =", ln, ", sub-sequence =", subseq)
import doctest
doctest.testmod()
| 357 |
import sys
from collections import defaultdict
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = []
def lowercase_ ( self , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
return self.node_position[vertex]
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = pos
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowerCamelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowerCamelCase = 2 * start + 1
else:
__lowerCamelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowerCamelCase , __lowerCamelCase = heap[smallest_child], positions[smallest_child]
__lowerCamelCase , __lowerCamelCase = (
heap[start],
positions[start],
)
__lowerCamelCase , __lowerCamelCase = temp, tempa
__lowerCamelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , lowerCamelCase__ )
self.top_to_bottom(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = position[index]
while index != 0:
__lowerCamelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowerCamelCase = heap[parent]
__lowerCamelCase = position[parent]
self.set_position(position[parent] , lowerCamelCase__ )
else:
__lowerCamelCase = val
__lowerCamelCase = temp
self.set_position(lowerCamelCase__ , lowerCamelCase__ )
break
__lowerCamelCase = parent
else:
__lowerCamelCase = val
__lowerCamelCase = temp
self.set_position(lowerCamelCase__ , 0 )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = len(lowerCamelCase__ ) // 2 - 1
for i in range(lowerCamelCase__ , -1 , -1 ):
self.top_to_bottom(lowerCamelCase__ , lowerCamelCase__ , len(lowerCamelCase__ ) , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = positions[0]
__lowerCamelCase = sys.maxsize
self.top_to_bottom(lowerCamelCase__ , 0 , len(lowerCamelCase__ ) , lowerCamelCase__ )
return temp
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = Heap()
__lowerCamelCase = [0] * len(UpperCamelCase__ )
__lowerCamelCase = [-1] * len(UpperCamelCase__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowerCamelCase = [] # Heap of Distance of vertices from their neighboring vertex
__lowerCamelCase = []
for vertex in range(len(UpperCamelCase__ ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCamelCase__ )
heap.node_position.append(UpperCamelCase__ )
__lowerCamelCase = []
__lowerCamelCase = 1
__lowerCamelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowerCamelCase = 0
__lowerCamelCase = distance
heap.heapify(UpperCamelCase__ , UpperCamelCase__ )
for _ in range(1 , len(UpperCamelCase__ ) ):
__lowerCamelCase = heap.delete_minimum(UpperCamelCase__ , UpperCamelCase__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__lowerCamelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCamelCase__ )]
):
__lowerCamelCase = distance
heap.bottom_to_top(
UpperCamelCase__ , heap.get_position(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__A = int(input("Enter number of edges: ").strip())
__A = defaultdict(list)
for _ in range(edges_number):
__A = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 348 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {
"configuration_electra": ["ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "ElectraConfig", "ElectraOnnxConfig"],
"tokenization_electra": ["ElectraTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["ElectraTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"ElectraForCausalLM",
"ElectraForMaskedLM",
"ElectraForMultipleChoice",
"ElectraForPreTraining",
"ElectraForQuestionAnswering",
"ElectraForSequenceClassification",
"ElectraForTokenClassification",
"ElectraModel",
"ElectraPreTrainedModel",
"load_tf_weights_in_electra",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFElectraForMaskedLM",
"TFElectraForMultipleChoice",
"TFElectraForPreTraining",
"TFElectraForQuestionAnswering",
"TFElectraForSequenceClassification",
"TFElectraForTokenClassification",
"TFElectraModel",
"TFElectraPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"FlaxElectraForCausalLM",
"FlaxElectraForMaskedLM",
"FlaxElectraForMultipleChoice",
"FlaxElectraForPreTraining",
"FlaxElectraForQuestionAnswering",
"FlaxElectraForSequenceClassification",
"FlaxElectraForTokenClassification",
"FlaxElectraModel",
"FlaxElectraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 358 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=__magic_name__ )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = field(default='''question-answering-extractive''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
snake_case_ = Features({'''question''': Value('''string''' ), '''context''': Value('''string''' )} )
snake_case_ = Features(
{
'''answers''': Sequence(
{
'''text''': Value('''string''' ),
'''answer_start''': Value('''int32''' ),
} )
} )
snake_case_ = "question"
snake_case_ = "context"
snake_case_ = "answers"
@property
def lowercase_ ( self ) -> Dict[str, str]:
'''simple docstring'''
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 348 | 0 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = '▁'
__A = {'vocab_file': 'prophetnet.tokenizer'}
__A = {
'vocab_file': {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'
),
}
}
__A = {
'microsoft/xprophetnet-large-wiki100-cased': {'do_lower_case': False},
}
__A = {
'microsoft/xprophetnet-large-wiki100-cased': 5_12,
}
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = collections.OrderedDict()
with open(_UpperCAmelCase , 'r' , encoding='utf-8' ) as reader:
__lowerCamelCase = reader.readlines()
for index, token in enumerate(_UpperCAmelCase ):
__lowerCamelCase = token.rstrip('\n' )
__lowerCamelCase = index
return vocab
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCamelCase__ , lowerCamelCase__="[SEP]" , lowerCamelCase__="[SEP]" , lowerCamelCase__="[SEP]" , lowerCamelCase__="[UNK]" , lowerCamelCase__="[PAD]" , lowerCamelCase__="[CLS]" , lowerCamelCase__="[MASK]" , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> None:
'''simple docstring'''
__lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'
' pip install sentencepiece' )
raise
__lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase__ ) )
__lowerCamelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
__lowerCamelCase = {'[PAD]': 0, '[CLS]': 1, '[SEP]': 2, '[UNK]': 3, '[MASK]': 4}
for i in range(10 ):
__lowerCamelCase = f"""[unused{i}]"""
__lowerCamelCase = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
__lowerCamelCase = 12
__lowerCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(lowerCamelCase__ )
def __getstate__( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = self.__dict__.copy()
__lowerCamelCase = None
return state
def __setstate__( self , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
__lowerCamelCase = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'
' pip install sentencepiece' )
raise
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__lowerCamelCase = {}
__lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return ([0] * len(lowerCamelCase__ )) + [1]
return ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ )) + [1]
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[int]:
'''simple docstring'''
__lowerCamelCase = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase_ ( self , lowerCamelCase__ ) -> str:
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__lowerCamelCase = self.sp_model.PieceToId(lowerCamelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowercase_ ( self , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowercase_ ( self , lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = ''.join(lowerCamelCase__ ).replace(lowerCamelCase__ , ' ' ).strip()
return out_string
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCamelCase = os.path.join(
lowerCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__ , 'wb' ) as fi:
__lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (out_vocab_file,)
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
__lowerCamelCase = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 359 |
import requests
__A = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> None:
"""simple docstring"""
__lowerCamelCase = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['articles'] , 1 ):
print(F"""{i}.) {article['title']}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
| 348 | 0 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
__A = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n"
__A = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n"
__A = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : Tuple ) -> Optional[Any]:
"""simple docstring"""
return float((preds == labels).mean() )
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int ) -> Optional[int]:
"""simple docstring"""
__lowerCamelCase = simple_accuracy(a__ , a__ )
__lowerCamelCase = float(fa_score(y_true=a__ , y_pred=a__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowerCamelCase = float(pearsonr(a__ , a__ )[0] )
__lowerCamelCase = float(spearmanr(a__ , a__ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(__UpperCamelCase , __UpperCamelCase )}
elif self.config_name == "stsb":
return pearson_and_spearman(__UpperCamelCase , __UpperCamelCase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(__UpperCamelCase , __UpperCamelCase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(__UpperCamelCase , __UpperCamelCase )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
| 360 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__A = logging.get_logger(__name__)
__A = TypeVar("DatasetType", Dataset, IterableDataset)
def lowerCamelCase_ ( UpperCamelCase__ : List[DatasetType] , UpperCamelCase__ : Optional[List[float]] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[DatasetInfo] = None , UpperCamelCase__ : Optional[NamedSplit] = None , UpperCamelCase__ : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType:
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(UpperCamelCase__ ):
if not isinstance(UpperCamelCase__ , (Dataset, IterableDataset) ):
if isinstance(UpperCamelCase__ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'is an empty dataset dictionary.' )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(UpperCamelCase__ )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(UpperCamelCase__ ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase__ ).__name__}.""" )
if i == 0:
__lowerCamelCase , __lowerCamelCase = (
(Dataset, IterableDataset) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else (IterableDataset, Dataset)
)
elif not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , stopping_strategy=UpperCamelCase__ )
else:
return _interleave_iterable_datasets(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , stopping_strategy=UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : List[DatasetType] , UpperCamelCase__ : Optional[DatasetInfo] = None , UpperCamelCase__ : Optional[NamedSplit] = None , UpperCamelCase__ : int = 0 , ) -> DatasetType:
"""simple docstring"""
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(UpperCamelCase__ ):
if not isinstance(UpperCamelCase__ , (Dataset, IterableDataset) ):
if isinstance(UpperCamelCase__ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'is an empty dataset dictionary.' )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(UpperCamelCase__ )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(UpperCamelCase__ ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase__ ).__name__}.""" )
if i == 0:
__lowerCamelCase , __lowerCamelCase = (
(Dataset, IterableDataset) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else (IterableDataset, Dataset)
)
elif not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , axis=UpperCamelCase__ )
else:
return _concatenate_iterable_datasets(UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , axis=UpperCamelCase__ )
| 348 | 0 |
"""simple docstring"""
def lowerCamelCase_ ( UpperCamelCase__ : int ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = [[0 for _ in range(UpperCamelCase__ )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__lowerCamelCase = 1
for n in range(m + 1 ):
for k in range(1 , UpperCamelCase__ ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
__A = int(input("Enter a number: ").strip())
print(partition(n))
except ValueError:
print("Please enter a number.")
else:
try:
__A = int(sys.argv[1])
print(partition(n))
except ValueError:
print("Please pass a number.")
| 361 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = ["model.decoder.embed_positions.weights"]
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> List[Any]:
"""simple docstring"""
if "emb" in name:
__lowerCamelCase = name.replace('emb' , 'model.decoder.embed_tokens' )
if "transformer" in name:
__lowerCamelCase = name.replace('transformer' , 'model.decoder' )
if "cross_attention" in name:
__lowerCamelCase = name.replace('cross_attention' , 'encoder_attn' )
if "linear1" in name:
__lowerCamelCase = name.replace('linear1' , 'fc1' )
if "linear2" in name:
__lowerCamelCase = name.replace('linear2' , 'fc2' )
if "norm1" in name:
__lowerCamelCase = name.replace('norm1' , 'self_attn_layer_norm' )
if "norm_cross" in name:
__lowerCamelCase = name.replace('norm_cross' , 'encoder_attn_layer_norm' )
if "norm2" in name:
__lowerCamelCase = name.replace('norm2' , 'final_layer_norm' )
if "out_norm" in name:
__lowerCamelCase = name.replace('out_norm' , 'model.decoder.layer_norm' )
if "linears" in name:
__lowerCamelCase = name.replace('linears' , 'lm_heads' )
if "condition_provider.conditioners.description.output_proj" in name:
__lowerCamelCase = name.replace('condition_provider.conditioners.description.output_proj' , 'enc_to_dec_proj' )
return name
def lowerCamelCase_ ( UpperCamelCase__ : OrderedDict , UpperCamelCase__ : int ) -> Tuple[Dict, Dict]:
"""simple docstring"""
__lowerCamelCase = list(state_dict.keys() )
__lowerCamelCase = {}
for key in keys:
__lowerCamelCase = state_dict.pop(UpperCamelCase__ )
__lowerCamelCase = rename_keys(UpperCamelCase__ )
if "in_proj_weight" in key:
# split fused qkv proj
__lowerCamelCase = val[:hidden_size, :]
__lowerCamelCase = val[hidden_size : 2 * hidden_size, :]
__lowerCamelCase = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__lowerCamelCase = val
else:
__lowerCamelCase = val
return state_dict, enc_dec_proj_state_dict
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
__lowerCamelCase = 1024
__lowerCamelCase = 24
__lowerCamelCase = 16
elif checkpoint == "medium":
__lowerCamelCase = 1536
__lowerCamelCase = 48
__lowerCamelCase = 24
elif checkpoint == "large":
__lowerCamelCase = 2048
__lowerCamelCase = 48
__lowerCamelCase = 32
else:
raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
__lowerCamelCase = MusicgenDecoderConfig(
hidden_size=UpperCamelCase__ , ffn_dim=hidden_size * 4 , num_hidden_layers=UpperCamelCase__ , num_attention_heads=UpperCamelCase__ , )
return config
@torch.no_grad()
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Optional[int]="cpu" ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = MusicGen.get_pretrained(UpperCamelCase__ , device=UpperCamelCase__ )
__lowerCamelCase = decoder_config_from_checkpoint(UpperCamelCase__ )
__lowerCamelCase = fairseq_model.lm.state_dict()
__lowerCamelCase , __lowerCamelCase = rename_state_dict(
UpperCamelCase__ , hidden_size=decoder_config.hidden_size )
__lowerCamelCase = TaEncoderModel.from_pretrained('t5-base' )
__lowerCamelCase = EncodecModel.from_pretrained('facebook/encodec_32khz' )
__lowerCamelCase = MusicgenForCausalLM(UpperCamelCase__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__lowerCamelCase , __lowerCamelCase = decoder.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
for key in missing_keys.copy():
if key.startswith(('text_encoder', 'audio_encoder') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(UpperCamelCase__ ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
__lowerCamelCase = MusicgenForConditionalGeneration(text_encoder=UpperCamelCase__ , audio_encoder=UpperCamelCase__ , decoder=UpperCamelCase__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(UpperCamelCase__ )
# check we can do a forward pass
__lowerCamelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__lowerCamelCase = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__lowerCamelCase = model(input_ids=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('Incorrect shape for logits' )
# now construct the processor
__lowerCamelCase = AutoTokenizer.from_pretrained('t5-base' )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained('facebook/encodec_32khz' , padding_side='left' )
__lowerCamelCase = MusicgenProcessor(feature_extractor=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
# set the appropriate bos/pad token ids
__lowerCamelCase = 2048
__lowerCamelCase = 2048
# set other default generation config params
__lowerCamelCase = int(30 * audio_encoder.config.frame_rate )
__lowerCamelCase = True
__lowerCamelCase = 3.0
if pytorch_dump_folder is not None:
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(UpperCamelCase__ )
processor.push_to_hub(UpperCamelCase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
__A = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 348 | 0 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__A = logging.get_logger(__name__)
class __lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> None:
'''simple docstring'''
warnings.warn(
'The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use OwlViTImageProcessor instead.' , _a , )
super().__init__(*_a , **_a )
| 362 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''sew-d'''
def __init__( self , lowerCamelCase__=32 , lowerCamelCase__=768 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__=3_072 , lowerCamelCase__=2 , lowerCamelCase__=512 , lowerCamelCase__=256 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=("p2c", "c2p") , lowerCamelCase__="layer_norm" , lowerCamelCase__="gelu_python" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.0 , lowerCamelCase__=0.1 , lowerCamelCase__=0.02 , lowerCamelCase__=1e-7 , lowerCamelCase__=1e-5 , lowerCamelCase__="group" , lowerCamelCase__="gelu" , lowerCamelCase__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowerCamelCase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCamelCase__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCamelCase__=False , lowerCamelCase__=128 , lowerCamelCase__=16 , lowerCamelCase__=True , lowerCamelCase__=0.05 , lowerCamelCase__=10 , lowerCamelCase__=2 , lowerCamelCase__=0.0 , lowerCamelCase__=10 , lowerCamelCase__=0 , lowerCamelCase__="mean" , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=256 , lowerCamelCase__=0 , lowerCamelCase__=1 , lowerCamelCase__=2 , **lowerCamelCase__ , ) -> Any:
'''simple docstring'''
super().__init__(**lowerCamelCase__ , pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ )
__lowerCamelCase = hidden_size
__lowerCamelCase = feat_extract_norm
__lowerCamelCase = feat_extract_activation
__lowerCamelCase = list(lowerCamelCase__ )
__lowerCamelCase = list(lowerCamelCase__ )
__lowerCamelCase = list(lowerCamelCase__ )
__lowerCamelCase = conv_bias
__lowerCamelCase = num_conv_pos_embeddings
__lowerCamelCase = num_conv_pos_embedding_groups
__lowerCamelCase = len(self.conv_dim )
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = intermediate_size
__lowerCamelCase = squeeze_factor
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = position_buckets
__lowerCamelCase = share_att_key
__lowerCamelCase = relative_attention
__lowerCamelCase = norm_rel_ebd
__lowerCamelCase = list(lowerCamelCase__ )
__lowerCamelCase = hidden_act
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = feat_proj_dropout
__lowerCamelCase = final_dropout
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = feature_layer_norm_eps
__lowerCamelCase = initializer_range
__lowerCamelCase = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCamelCase = apply_spec_augment
__lowerCamelCase = mask_time_prob
__lowerCamelCase = mask_time_length
__lowerCamelCase = mask_time_min_masks
__lowerCamelCase = mask_feature_prob
__lowerCamelCase = mask_feature_length
__lowerCamelCase = mask_feature_min_masks
# ctc loss
__lowerCamelCase = ctc_loss_reduction
__lowerCamelCase = ctc_zero_infinity
# sequence classification
__lowerCamelCase = use_weighted_layer_sum
__lowerCamelCase = classifier_proj_size
@property
def lowercase_ ( self ) -> Any:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 348 | 0 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def lowerCamelCase_ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : str=False ) -> Optional[Any]:
"""simple docstring"""
__lowerCamelCase = OmegaConf.load(lowercase__ )
if display:
print(yaml.dump(OmegaConf.to_container(lowercase__ ) ) )
return config
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : int=None , UpperCamelCase__ : Optional[int]=None ) -> Dict:
"""simple docstring"""
if conf_path is None:
__lowerCamelCase = './model_checkpoints/vqgan_only.yaml'
__lowerCamelCase = load_config(lowercase__ , display=lowercase__ )
__lowerCamelCase = VQModel(**config.model.params )
if ckpt_path is None:
__lowerCamelCase = './model_checkpoints/vqgan_only.pt'
__lowerCamelCase = torch.load(lowercase__ , map_location=lowercase__ )
if ".ckpt" in ckpt_path:
__lowerCamelCase = sd['state_dict']
model.load_state_dict(lowercase__ , strict=lowercase__ )
model.to(lowercase__ )
del sd
return model
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : List[str] ) -> int:
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = model.encode(lowercase__ )
print(F"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" )
__lowerCamelCase = model.decode(lowercase__ )
return xrec
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any]=False ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = string.rsplit('.' , 1 )
if reload:
__lowerCamelCase = importlib.import_module(lowercase__ )
importlib.reload(lowercase__ )
return getattr(importlib.import_module(lowercase__ , package=lowercase__ ) , cls )
def lowerCamelCase_ ( UpperCamelCase__ : List[str] ) -> Any:
"""simple docstring"""
if "target" not in config:
raise KeyError('Expected key `target` to instantiate.' )
return get_obj_from_str(config['target'] )(**config.get('params' , {} ) )
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Optional[Any]=True ) -> Optional[int]:
"""simple docstring"""
__lowerCamelCase = instantiate_from_config(lowercase__ )
if sd is not None:
model.load_state_dict(lowercase__ )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : List[str] ) -> List[Any]:
"""simple docstring"""
if ckpt:
__lowerCamelCase = torch.load(lowercase__ , map_location='cpu' )
__lowerCamelCase = pl_sd['global_step']
print(F"""loaded model from global step {global_step}.""" )
else:
__lowerCamelCase = {'state_dict': None}
__lowerCamelCase = None
__lowerCamelCase = load_model_from_config(config.model , pl_sd['state_dict'] , gpu=lowercase__ , eval_mode=lowercase__ )['model']
return model, global_step
| 363 |
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__A = logging.get_logger("transformers.models.speecht5")
__A = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
__A = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
__A = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
__A = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
__A = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
__A = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
__A = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
__A = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
__A = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__A = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__A = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__A = []
__A = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
__A = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
__A = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
__A = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ) -> Dict:
"""simple docstring"""
for attribute in key.split('.' ):
__lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ )
if weight_type is not None:
__lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ ).shape
else:
__lowerCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__lowerCamelCase = value
elif weight_type == "weight_g":
__lowerCamelCase = value
elif weight_type == "weight_v":
__lowerCamelCase = value
elif weight_type == "bias":
__lowerCamelCase = value
elif weight_type == "running_mean":
__lowerCamelCase = value
elif weight_type == "running_var":
__lowerCamelCase = value
elif weight_type == "num_batches_tracked":
__lowerCamelCase = value
else:
__lowerCamelCase = value
logger.info(F"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict ) -> Any:
"""simple docstring"""
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
__lowerCamelCase , __lowerCamelCase = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowerCamelCase = []
if task == "s2t":
__lowerCamelCase = hf_model.speechta.encoder.prenet.feature_encoder
__lowerCamelCase = MAPPING_S2T
__lowerCamelCase = IGNORE_KEYS_S2T
elif task == "t2s":
__lowerCamelCase = None
__lowerCamelCase = MAPPING_T2S
__lowerCamelCase = IGNORE_KEYS_T2S
elif task == "s2s":
__lowerCamelCase = hf_model.speechta.encoder.prenet.feature_encoder
__lowerCamelCase = MAPPING_S2S
__lowerCamelCase = IGNORE_KEYS_S2S
else:
raise ValueError(F"""Unsupported task: {task}""" )
for name, value in fairseq_dict.items():
if should_ignore(UpperCamelCase__ , UpperCamelCase__ ):
logger.info(F"""{name} was ignored""" )
continue
__lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hf_model.config.feat_extract_norm == 'group' , )
__lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
__lowerCamelCase , __lowerCamelCase = key.split('.*.' )
if prefix in name and suffix in name:
__lowerCamelCase = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
__lowerCamelCase = True
if "*" in mapped_key:
__lowerCamelCase = name.split(UpperCamelCase__ )[0].split('.' )[-2]
__lowerCamelCase = mapped_key.replace('*' , UpperCamelCase__ )
if "weight_g" in name:
__lowerCamelCase = 'weight_g'
elif "weight_v" in name:
__lowerCamelCase = 'weight_v'
elif "bias" in name:
__lowerCamelCase = 'bias'
elif "weight" in name:
__lowerCamelCase = 'weight'
elif "running_mean" in name:
__lowerCamelCase = 'running_mean'
elif "running_var" in name:
__lowerCamelCase = 'running_var'
elif "num_batches_tracked" in name:
__lowerCamelCase = 'num_batches_tracked'
else:
__lowerCamelCase = None
set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
continue
if not is_used:
unused_weights.append(UpperCamelCase__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : List[str] ) -> Tuple:
"""simple docstring"""
__lowerCamelCase = full_name.split('conv_layers.' )[-1]
__lowerCamelCase = name.split('.' )
__lowerCamelCase = int(items[0] )
__lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCamelCase__ )
@torch.no_grad()
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : str=None , UpperCamelCase__ : List[str]=None , ) -> Tuple:
"""simple docstring"""
if config_path is not None:
__lowerCamelCase = SpeechTaConfig.from_pretrained(UpperCamelCase__ )
else:
__lowerCamelCase = SpeechTaConfig()
if task == "s2t":
__lowerCamelCase = config.max_text_positions
__lowerCamelCase = SpeechTaForSpeechToText(UpperCamelCase__ )
elif task == "t2s":
__lowerCamelCase = 1876
__lowerCamelCase = 600
__lowerCamelCase = config.max_speech_positions
__lowerCamelCase = SpeechTaForTextToSpeech(UpperCamelCase__ )
elif task == "s2s":
__lowerCamelCase = 1876
__lowerCamelCase = config.max_speech_positions
__lowerCamelCase = SpeechTaForSpeechToSpeech(UpperCamelCase__ )
else:
raise ValueError(F"""Unknown task name: {task}""" )
if vocab_path:
__lowerCamelCase = SpeechTaTokenizer(UpperCamelCase__ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
__lowerCamelCase = AddedToken('<mask>' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
__lowerCamelCase = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
__lowerCamelCase = SpeechTaFeatureExtractor()
__lowerCamelCase = SpeechTaProcessor(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
__lowerCamelCase = torch.load(UpperCamelCase__ )
recursively_load_weights(fairseq_checkpoint['model'] , UpperCamelCase__ , UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
if repo_id:
print('Pushing to the hub...' )
processor.push_to_hub(UpperCamelCase__ )
model.push_to_hub(UpperCamelCase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
__A = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 348 | 0 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__A = logging.get_logger(__name__)
__A = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
'constant': get_constant_schedule,
'constant_w_warmup': get_constant_schedule_with_warmup,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if config is None:
assert isinstance(self.model , _SCREAMING_SNAKE_CASE ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f""" {self.model.__class__}"""
)
__lowerCamelCase = self.model.config
else:
__lowerCamelCase = config
__lowerCamelCase = data_args
__lowerCamelCase = self.config.tgt_vocab_size if isinstance(self.config , _SCREAMING_SNAKE_CASE ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
' padding..' )
if self.args.label_smoothing == 0:
__lowerCamelCase = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
__lowerCamelCase = label_smoothed_nll_loss
def lowercase_ ( self , lowerCamelCase__ ) -> str:
'''simple docstring'''
if self.optimizer is None:
__lowerCamelCase = ["bias", "LayerNorm.weight"]
__lowerCamelCase = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
__lowerCamelCase = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
__lowerCamelCase = Adafactor
__lowerCamelCase = {"scale_parameter": False, "relative_step": False}
else:
__lowerCamelCase = AdamW
__lowerCamelCase = {
"betas": (self.args.adam_betaa, self.args.adam_betaa),
"eps": self.args.adam_epsilon,
}
__lowerCamelCase = self.args.learning_rate
if self.sharded_ddp:
__lowerCamelCase = OSS(
params=_SCREAMING_SNAKE_CASE , optim=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
else:
__lowerCamelCase = optimizer_cls(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if self.lr_scheduler is None:
__lowerCamelCase = self._get_lr_scheduler(_SCREAMING_SNAKE_CASE )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def lowercase_ ( self , lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
__lowerCamelCase = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
__lowerCamelCase = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
__lowerCamelCase = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=_SCREAMING_SNAKE_CASE )
return scheduler
def lowercase_ ( self ) -> Optional[torch.utils.data.Sampler]:
'''simple docstring'''
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
__lowerCamelCase = model(**_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE )[0]
__lowerCamelCase = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
__lowerCamelCase = model(**_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE )[:2]
else:
# compute label smoothed loss
__lowerCamelCase = model(**_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE )[0]
__lowerCamelCase = torch.nn.functional.log_softmax(_SCREAMING_SNAKE_CASE , dim=-1 )
__lowerCamelCase = self.loss_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = inputs.pop('labels' )
__lowerCamelCase = self._compute_loss(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return loss
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
'''simple docstring'''
__lowerCamelCase = self._prepare_inputs(_SCREAMING_SNAKE_CASE )
__lowerCamelCase = {
"max_length": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
__lowerCamelCase = self.model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , **_SCREAMING_SNAKE_CASE , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
__lowerCamelCase = self._pad_tensors_to_max_len(_SCREAMING_SNAKE_CASE , gen_kwargs['max_length'] )
__lowerCamelCase = inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
__lowerCamelCase = self._compute_loss(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCamelCase = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
__lowerCamelCase = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
__lowerCamelCase = self._pad_tensors_to_max_len(_SCREAMING_SNAKE_CASE , gen_kwargs['max_length'] )
return (loss, logits, labels)
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
f""" padded to `max_length`={max_length}""" )
__lowerCamelCase = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
__lowerCamelCase = tensor
return padded_tensor
| 364 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = [False] * len(UpperCamelCase__ )
__lowerCamelCase = [-1] * len(UpperCamelCase__ )
def dfs(UpperCamelCase__ : int , UpperCamelCase__ : int ):
__lowerCamelCase = True
__lowerCamelCase = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCamelCase__ , 1 - c )
for i in range(len(UpperCamelCase__ ) ):
if not visited[i]:
dfs(UpperCamelCase__ , 0 )
for i in range(len(UpperCamelCase__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
__A = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 348 | 0 |
import math
import unittest
def lowerCamelCase_ ( UpperCamelCase__ : int ) -> List[Any]:
"""simple docstring"""
assert isinstance(_a , _a ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , )
self.assertFalse(
is_prime(1 ) , 'One only has 1 positive factor, primes must have exactly two.' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 365 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=lowerCamelCase__ ).to(lowerCamelCase__ )
__lowerCamelCase = AutoTokenizer.from_pretrained('google/mt5-small' )
__lowerCamelCase = tokenizer('Hello there' , return_tensors='pt' ).input_ids
__lowerCamelCase = tokenizer('Hi I am' , return_tensors='pt' ).input_ids
__lowerCamelCase = model(input_ids.to(lowerCamelCase__ ) , labels=labels.to(lowerCamelCase__ ) ).loss
__lowerCamelCase = -(labels.shape[-1] * loss.item())
__lowerCamelCase = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 348 | 0 |
from __future__ import annotations
import math
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase_ ( UpperCamelCase__ : Dict ) -> list[int]:
"""simple docstring"""
__lowerCamelCase = str(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase = [n]
for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def lowerCamelCase_ ( UpperCamelCase__ : int ) -> bool:
"""simple docstring"""
if len(str(SCREAMING_SNAKE_CASE_ ) ) > 3:
if not is_prime(int(str(SCREAMING_SNAKE_CASE_ )[-3:] ) ) or not is_prime(int(str(SCREAMING_SNAKE_CASE_ )[:3] ) ):
return False
return True
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] = 11 ) -> list[int]:
"""simple docstring"""
__lowerCamelCase = []
__lowerCamelCase = 13
while len(SCREAMING_SNAKE_CASE_ ) != count:
if validate(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase = list_truncated_nums(SCREAMING_SNAKE_CASE_ )
if all(is_prime(SCREAMING_SNAKE_CASE_ ) for i in list_nums ):
list_truncated_primes.append(SCREAMING_SNAKE_CASE_ )
num += 2
return list_truncated_primes
def lowerCamelCase_ ( ) -> int:
"""simple docstring"""
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(f'''{sum(compute_truncated_primes(11)) = }''')
| 366 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> Any:
"""simple docstring"""
__lowerCamelCase = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2]
__lowerCamelCase = True if 'large' in model_name or 'huge' in model_name else False
__lowerCamelCase = True if 'large' in model_name or 'huge' in model_name else False
__lowerCamelCase = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__lowerCamelCase = [3, 3, 3, 3]
__lowerCamelCase = [5, 5, 5, 5]
elif "fl4" in model_name:
__lowerCamelCase = [4, 4, 4, 4]
__lowerCamelCase = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__lowerCamelCase = [3, 3, 3, 3]
if "lrf" in model_name:
__lowerCamelCase = [3, 3, 3, 3]
else:
__lowerCamelCase = [2, 2, 2, 2]
if "tiny" in model_name:
__lowerCamelCase = 96
elif "small" in model_name:
__lowerCamelCase = 96
elif "base" in model_name:
__lowerCamelCase = 128
elif "large" in model_name:
__lowerCamelCase = 192
elif "xlarge" in model_name:
__lowerCamelCase = 256
elif "huge" in model_name:
__lowerCamelCase = 352
# set label information
__lowerCamelCase = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
__lowerCamelCase = 'imagenet-22k-id2label.json'
else:
__lowerCamelCase = 'imagenet-1k-id2label.json'
__lowerCamelCase = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) )
__lowerCamelCase = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
__lowerCamelCase = {v: k for k, v in idalabel.items()}
__lowerCamelCase = FocalNetConfig(
embed_dim=UpperCamelCase__ , depths=UpperCamelCase__ , focal_levels=UpperCamelCase__ , focal_windows=UpperCamelCase__ , use_conv_embed=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ , use_post_layernorm=UpperCamelCase__ , use_layerscale=UpperCamelCase__ , )
return config
def lowerCamelCase_ ( UpperCamelCase__ : Any ) -> str:
"""simple docstring"""
if "patch_embed.proj" in name:
__lowerCamelCase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__lowerCamelCase = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
__lowerCamelCase = 'encoder.' + name
if "encoder.layers" in name:
__lowerCamelCase = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
__lowerCamelCase = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
__lowerCamelCase = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__lowerCamelCase = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__lowerCamelCase = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__lowerCamelCase = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
__lowerCamelCase = 'layernorm.weight'
if name == "norm.bias":
__lowerCamelCase = 'layernorm.bias'
if "head" in name:
__lowerCamelCase = name.replace('head' , 'classifier' )
else:
__lowerCamelCase = 'focalnet.' + name
return name
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=False ) -> Dict:
"""simple docstring"""
__lowerCamelCase = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
__lowerCamelCase = model_name_to_url[model_name]
print('Checkpoint URL: ' , UpperCamelCase__ )
__lowerCamelCase = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
__lowerCamelCase = state_dict.pop(UpperCamelCase__ )
__lowerCamelCase = val
__lowerCamelCase = get_focalnet_config(UpperCamelCase__ )
__lowerCamelCase = FocalNetForImageClassification(UpperCamelCase__ )
model.eval()
# load state dict
model.load_state_dict(UpperCamelCase__ )
# verify conversion
__lowerCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCamelCase = BitImageProcessor(
do_resize=UpperCamelCase__ , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCamelCase__ , crop_size=224 , do_normalize=UpperCamelCase__ , image_mean=UpperCamelCase__ , image_std=UpperCamelCase__ , )
__lowerCamelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
__lowerCamelCase = processor(images=UpperCamelCase__ , return_tensors='pt' )
__lowerCamelCase = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
__lowerCamelCase = image_transforms(UpperCamelCase__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , UpperCamelCase__ , atol=1E-4 )
__lowerCamelCase = model(**UpperCamelCase__ )
__lowerCamelCase = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__lowerCamelCase = torch.tensor([0.21_66, -0.43_68, 0.21_91] )
elif model_name == "focalnet-tiny-lrf":
__lowerCamelCase = torch.tensor([1.16_69, 0.01_25, -0.16_95] )
elif model_name == "focalnet-small":
__lowerCamelCase = torch.tensor([0.49_17, -0.04_30, 0.13_41] )
elif model_name == "focalnet-small-lrf":
__lowerCamelCase = torch.tensor([-0.25_88, -0.53_42, -0.23_31] )
elif model_name == "focalnet-base":
__lowerCamelCase = torch.tensor([-0.16_55, -0.40_90, -0.17_30] )
elif model_name == "focalnet-base-lrf":
__lowerCamelCase = torch.tensor([0.53_06, -0.04_83, -0.39_28] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print(F"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(F"""{model_name}""" )
processor.push_to_hub(F"""{model_name}""" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="focalnet-tiny",
type=str,
help="Name of the FocalNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub.",
)
__A = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 348 | 0 |
"""simple docstring"""
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def lowerCamelCase_ ( *UpperCamelCase__ : List[Any] ) -> Any:
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE__ , 'r' ) as fh:
fcntl.flock(SCREAMING_SNAKE_CASE__ , fcntl.LOCK_EX )
try:
print(*SCREAMING_SNAKE_CASE__ )
finally:
fcntl.flock(SCREAMING_SNAKE_CASE__ , fcntl.LOCK_UN )
__A = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
__A = torch.device("cuda", local_rank)
__A = socket.gethostname()
__A = f'''[{hostname}-{local_rank}]'''
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__A = dist.get_rank()
__A = dist.get_world_size()
printflock(f'''{gpu} is OK (global rank: {rank}/{world_size})''')
dist.barrier()
if rank == 0:
printflock(f'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''')
except Exception:
printflock(f'''{gpu} is broken''')
raise
| 367 |
from __future__ import annotations
def lowerCamelCase_ ( UpperCamelCase__ : list[float] , UpperCamelCase__ : list[float] ) -> float:
"""simple docstring"""
__lowerCamelCase = sorted(numsa + numsa )
__lowerCamelCase , __lowerCamelCase = divmod(len(UpperCamelCase__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = [float(x) for x in input("Enter the elements of first array: ").split()]
__A = [float(x) for x in input("Enter the elements of second array: ").split()]
print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 348 | 0 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json''',
}
class __lowerCAmelCase ( _a ):
"""simple docstring"""
snake_case_ = """mvp"""
snake_case_ = ["""past_key_values"""]
snake_case_ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , lowerCamelCase__=50_267 , lowerCamelCase__=1_024 , lowerCamelCase__=12 , lowerCamelCase__=4_096 , lowerCamelCase__=16 , lowerCamelCase__=12 , lowerCamelCase__=4_096 , lowerCamelCase__=16 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__="gelu" , lowerCamelCase__=1_024 , lowerCamelCase__=0.1 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.02 , lowerCamelCase__=0.0 , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=1 , lowerCamelCase__=0 , lowerCamelCase__=2 , lowerCamelCase__=True , lowerCamelCase__=2 , lowerCamelCase__=2 , lowerCamelCase__=False , lowerCamelCase__=100 , lowerCamelCase__=800 , **lowerCamelCase__ , ) -> Any:
'''simple docstring'''
__lowerCamelCase = vocab_size
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = d_model
__lowerCamelCase = encoder_ffn_dim
__lowerCamelCase = encoder_layers
__lowerCamelCase = encoder_attention_heads
__lowerCamelCase = decoder_ffn_dim
__lowerCamelCase = decoder_layers
__lowerCamelCase = decoder_attention_heads
__lowerCamelCase = dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = activation_function
__lowerCamelCase = init_std
__lowerCamelCase = encoder_layerdrop
__lowerCamelCase = decoder_layerdrop
__lowerCamelCase = classifier_dropout
__lowerCamelCase = use_cache
__lowerCamelCase = encoder_layers
__lowerCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCamelCase = use_prompt
__lowerCamelCase = prompt_length
__lowerCamelCase = prompt_mid_dim
super().__init__(
pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , forced_eos_token_id=__lowerCamelCase , **__lowerCamelCase , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , __lowerCamelCase ):
__lowerCamelCase = self.bos_token_id
warnings.warn(
f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
'The config can simply be saved and uploaded again to be fixed.' )
| 368 |
__A = {
"joule": 1.0,
"kilojoule": 10_00,
"megajoule": 1_00_00_00,
"gigajoule": 10_00_00_00_00,
"wattsecond": 1.0,
"watthour": 36_00,
"kilowatthour": 3_60_00_00,
"newtonmeter": 1.0,
"calorie_nutr": 41_86.8,
"kilocalorie_nutr": 4_18_68_00.00,
"electronvolt": 1.6_0_2_1_7_6_6_3_4e-1_9,
"britishthermalunit_it": 10_55.0_55_85,
"footpound": 1.3_5_5_8_1_8,
}
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__lowerCamelCase = (
F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {', '.join(UpperCamelCase__ )}"""
)
raise ValueError(UpperCamelCase__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348 | 0 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__A = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
__A = json.load(f)
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
return FSMTTokenizer.from_pretrained(lowercase_ )
def lowercase_ ( self , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
__lowerCamelCase = FSMTForConditionalGeneration.from_pretrained(lowercase_ ).to(lowercase_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = f"""facebook/wmt19-{pair}"""
__lowerCamelCase = self.get_tokenizer(lowercase_ )
__lowerCamelCase = self.get_model(lowercase_ )
__lowerCamelCase = bleu_data[pair]['src']
__lowerCamelCase = bleu_data[pair]['tgt']
__lowerCamelCase = tokenizer(lowercase_ , return_tensors='pt' , truncation=lowercase_ , padding='longest' ).to(lowercase_ )
__lowerCamelCase = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
__lowerCamelCase = tokenizer.batch_decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
__lowerCamelCase = calculate_bleu(lowercase_ , lowercase_ )
print(lowercase_ )
self.assertGreaterEqual(scores['bleu'] , lowercase_ )
| 369 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''philschmid/bart-large-cnn-samsum'''
snake_case_ = (
'''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '''
'''and returns a summary of the text.'''
)
snake_case_ = '''summarizer'''
snake_case_ = AutoTokenizer
snake_case_ = AutoModelForSeqaSeqLM
snake_case_ = ['''text''']
snake_case_ = ['''text''']
def lowercase_ ( self , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return self.pre_processor(lowerCamelCase__ , return_tensors='pt' , truncation=lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
return self.model.generate(**lowerCamelCase__ )[0]
def lowercase_ ( self , lowerCamelCase__ ) -> Any:
'''simple docstring'''
return self.pre_processor.decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ )
| 348 | 0 |
import pytest
import datasets
# Import fixture modules as plugins
__A = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : str ) -> Any:
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> int:
"""simple docstring"""
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=_lowerCamelCase )
def lowerCamelCase_ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowerCamelCase = tmp_path_factory.getbasetemp() / "cache"
__lowerCamelCase = test_hf_cache_home / "datasets"
__lowerCamelCase = test_hf_cache_home / "metrics"
__lowerCamelCase = test_hf_cache_home / "modules"
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(_lowerCamelCase ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(_lowerCamelCase ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(_lowerCamelCase ) )
__lowerCamelCase = test_hf_datasets_cache / "downloads"
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(_lowerCamelCase ) )
__lowerCamelCase = test_hf_datasets_cache / "downloads" / "extracted"
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_lowerCamelCase ) )
@pytest.fixture(autouse=_lowerCamelCase , scope='session' )
def lowerCamelCase_ ( ) -> List[str]:
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=_lowerCamelCase )
def lowerCamelCase_ ( UpperCamelCase__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , _lowerCamelCase )
@pytest.fixture
def lowerCamelCase_ ( UpperCamelCase__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , _lowerCamelCase )
| 370 |
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=16 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=4 , ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_attention_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_choices
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_attention_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = True
__lowerCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = True
snake_case_ = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowerCamelCase = model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase__ )
__lowerCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase__ )
__lowerCamelCase = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
__lowerCamelCase = model(lowerCamelCase__ )[0]
__lowerCamelCase = [1, 11, 50_265]
self.assertEqual(list(output.shape ) , lowerCamelCase__ )
# compare the actual values for a slice.
__lowerCamelCase = np.array(
[[[40.48_80, 18.01_99, -5.23_67], [-1.88_77, -4.08_85, 10.70_85], [-2.26_13, -5.61_10, 7.26_65]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
@slow
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase__ )
__lowerCamelCase = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
__lowerCamelCase = model(lowerCamelCase__ )[0]
# compare the actual values for a slice.
__lowerCamelCase = np.array(
[[[0.02_08, -0.03_56, 0.02_37], [-0.15_69, -0.04_11, -0.26_26], [0.18_79, 0.01_25, -0.00_89]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 348 | 0 |
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=__magic_name__ ):
snake_case_ = ['''torch''', '''scipy''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['torch', 'scipy'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['torch', 'scipy'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['torch', 'scipy'] )
| 371 |
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
| 348 | 0 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowerCamelCase = MobileBertConfig.from_json_file(lowerCamelCase__ )
print(F"""Building PyTorch model from configuration: {config}""" )
__lowerCamelCase = MobileBertForPreTraining(lowerCamelCase__ )
# Load weights from tf checkpoint
__lowerCamelCase = load_tf_weights_in_mobilebert(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowerCamelCase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 350 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def lowercase_ ( self , lowerCamelCase__=0 ) -> int:
'''simple docstring'''
__lowerCamelCase = floats_tensor((1, 3, 128, 128) , rng=random.Random(lowerCamelCase__ ) )
__lowerCamelCase = np.random.RandomState(lowerCamelCase__ )
__lowerCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__lowerCamelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__lowerCamelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
# warmup pass to apply optimizations
__lowerCamelCase = pipe(**self.get_dummy_inputs() )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__lowerCamelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__lowerCamelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__lowerCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def lowercase_ ( self ) -> int:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = ort.SessionOptions()
__lowerCamelCase = False
return options
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
__lowerCamelCase = init_image.resize((768, 512) )
# using the PNDM scheduler by default
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = 'A fantasy landscape, trending on artstation'
__lowerCamelCase = np.random.RandomState(0 )
__lowerCamelCase = pipe(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCamelCase__ , output_type='np' , )
__lowerCamelCase = output.images
__lowerCamelCase = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__lowerCamelCase = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
__lowerCamelCase = init_image.resize((768, 512) )
__lowerCamelCase = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=lowerCamelCase__ , safety_checker=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = 'A fantasy landscape, trending on artstation'
__lowerCamelCase = np.random.RandomState(0 )
__lowerCamelCase = pipe(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=lowerCamelCase__ , output_type='np' , )
__lowerCamelCase = output.images
__lowerCamelCase = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__lowerCamelCase = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 348 | 0 |
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
__lowerCamelCase = botoa.client('iam' )
__lowerCamelCase = {
"Version": "2012-10-17",
"Statement": [
{"Effect": "Allow", "Principal": {"Service": "sagemaker.amazonaws.com"}, "Action": "sts:AssumeRole"}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=_SCREAMING_SNAKE_CASE , AssumeRolePolicyDocument=json.dumps(_SCREAMING_SNAKE_CASE , indent=2 ) )
__lowerCamelCase = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"sagemaker:*",
"ecr:GetDownloadUrlForLayer",
"ecr:BatchGetImage",
"ecr:BatchCheckLayerAvailability",
"ecr:GetAuthorizationToken",
"cloudwatch:PutMetricData",
"cloudwatch:GetMetricData",
"cloudwatch:GetMetricStatistics",
"cloudwatch:ListMetrics",
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:DescribeLogStreams",
"logs:PutLogEvents",
"logs:GetLogEvents",
"s3:CreateBucket",
"s3:ListBucket",
"s3:GetBucketLocation",
"s3:GetObject",
"s3:PutObject",
],
"Resource": "*",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=_SCREAMING_SNAKE_CASE , PolicyName=F"""{role_name}_policy_permission""" , PolicyDocument=json.dumps(_SCREAMING_SNAKE_CASE , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F"""role {role_name} already exists. Using existing one""" )
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] ) -> int:
"""simple docstring"""
__lowerCamelCase = botoa.client('iam' )
return iam_client.get_role(RoleName=_SCREAMING_SNAKE_CASE )["Role"]["Arn"]
def lowerCamelCase_ ( ) -> int:
"""simple docstring"""
__lowerCamelCase = _ask_options(
'How do you want to authorize?' , ['AWS Profile', 'Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '] , _SCREAMING_SNAKE_CASE , )
__lowerCamelCase = None
if credentials_configuration == 0:
__lowerCamelCase = _ask_field('Enter your AWS Profile name: [default] ' , default='default' )
__lowerCamelCase = aws_profile
else:
print(
'Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'
'`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`' )
__lowerCamelCase = _ask_field('AWS Access Key ID: ' )
__lowerCamelCase = aws_access_key_id
__lowerCamelCase = _ask_field('AWS Secret Access Key: ' )
__lowerCamelCase = aws_secret_access_key
__lowerCamelCase = _ask_field('Enter your AWS Region: [us-east-1]' , default='us-east-1' )
__lowerCamelCase = aws_region
__lowerCamelCase = _ask_options(
'Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?' , ['Provide IAM Role name', 'Create new IAM role using credentials'] , _SCREAMING_SNAKE_CASE , )
if role_management == 0:
__lowerCamelCase = _ask_field('Enter your IAM role name: ' )
else:
__lowerCamelCase = "accelerate_sagemaker_execution_role"
print(F"""Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials""" )
_create_iam_role_for_sagemaker(_SCREAMING_SNAKE_CASE )
__lowerCamelCase = _ask_field(
'Do you want to use custom Docker image? [yes/NO]: ' , _convert_yes_no_to_bool , default=_SCREAMING_SNAKE_CASE , error_message='Please enter yes or no.' , )
__lowerCamelCase = None
if is_custom_docker_image:
__lowerCamelCase = _ask_field('Enter your Docker image: ' , lambda UpperCamelCase__ : str(_SCREAMING_SNAKE_CASE ).lower() )
__lowerCamelCase = _ask_field(
'Do you want to provide SageMaker input channels with data locations? [yes/NO]: ' , _convert_yes_no_to_bool , default=_SCREAMING_SNAKE_CASE , error_message='Please enter yes or no.' , )
__lowerCamelCase = None
if is_sagemaker_inputs_enabled:
__lowerCamelCase = _ask_field(
'Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ' , lambda UpperCamelCase__ : str(_SCREAMING_SNAKE_CASE ).lower() , )
__lowerCamelCase = _ask_field(
'Do you want to enable SageMaker metrics? [yes/NO]: ' , _convert_yes_no_to_bool , default=_SCREAMING_SNAKE_CASE , error_message='Please enter yes or no.' , )
__lowerCamelCase = None
if is_sagemaker_metrics_enabled:
__lowerCamelCase = _ask_field(
'Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ' , lambda UpperCamelCase__ : str(_SCREAMING_SNAKE_CASE ).lower() , )
__lowerCamelCase = _ask_options(
'What is the distributed mode?' , ['No distributed training', 'Data parallelism'] , _convert_sagemaker_distributed_mode , )
__lowerCamelCase = {}
__lowerCamelCase = _ask_field(
'Do you wish to optimize your script with torch dynamo?[yes/NO]:' , _convert_yes_no_to_bool , default=_SCREAMING_SNAKE_CASE , error_message='Please enter yes or no.' , )
if use_dynamo:
__lowerCamelCase = "dynamo_"
__lowerCamelCase = _ask_options(
'Which dynamo backend would you like to use?' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
__lowerCamelCase = _ask_field(
'Do you want to customize the defaults sent to torch.compile? [yes/NO]: ' , _convert_yes_no_to_bool , default=_SCREAMING_SNAKE_CASE , error_message='Please enter yes or no.' , )
if use_custom_options:
__lowerCamelCase = _ask_options(
'Which mode do you want to use?' , _SCREAMING_SNAKE_CASE , lambda UpperCamelCase__ : TORCH_DYNAMO_MODES[int(_SCREAMING_SNAKE_CASE )] , default='default' , )
__lowerCamelCase = _ask_field(
'Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ' , _convert_yes_no_to_bool , default=_SCREAMING_SNAKE_CASE , error_message='Please enter yes or no.' , )
__lowerCamelCase = _ask_field(
'Do you want to enable dynamic shape tracing? [yes/NO]: ' , _convert_yes_no_to_bool , default=_SCREAMING_SNAKE_CASE , error_message='Please enter yes or no.' , )
__lowerCamelCase = "Which EC2 instance type you want to use for your training?"
if distributed_type != SageMakerDistributedType.NO:
__lowerCamelCase = _ask_options(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , lambda UpperCamelCase__ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(_SCREAMING_SNAKE_CASE )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
__lowerCamelCase = _ask_field(_SCREAMING_SNAKE_CASE , lambda UpperCamelCase__ : str(_SCREAMING_SNAKE_CASE ).lower() , default='ml.p3.2xlarge' )
__lowerCamelCase = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
__lowerCamelCase = _ask_field(
'How many machines do you want use? [1]: ' , _SCREAMING_SNAKE_CASE , default=1 , )
__lowerCamelCase = _ask_options(
'Do you wish to use FP16 or BF16 (mixed precision)?' , ['no', 'fp16', 'bf16', 'fp8'] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
'Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.' )
return SageMakerConfig(
image_uri=_SCREAMING_SNAKE_CASE , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=_SCREAMING_SNAKE_CASE , use_cpu=_SCREAMING_SNAKE_CASE , dynamo_config=_SCREAMING_SNAKE_CASE , eca_instance_type=_SCREAMING_SNAKE_CASE , profile=_SCREAMING_SNAKE_CASE , region=_SCREAMING_SNAKE_CASE , iam_role_name=_SCREAMING_SNAKE_CASE , mixed_precision=_SCREAMING_SNAKE_CASE , num_machines=_SCREAMING_SNAKE_CASE , sagemaker_inputs_file=_SCREAMING_SNAKE_CASE , sagemaker_metrics_file=_SCREAMING_SNAKE_CASE , )
| 351 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__A = logging.get_logger(__name__)
__A = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
__A = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_57, 3_66, 4_38, 5_32, 6_85,
7_05, 7_96, 9_30, 10_58, 12_20, 12_67, 12_79, 13_03, 13_43, 13_77,
13_91, 16_35, 17_82, 18_75, 21_62, 23_61, 24_88, 34_67, 40_08, 42_11,
46_00, 48_08, 52_99, 58_55, 63_29, 72_03, 96_09, 99_59, 1_05_63, 1_07_86,
1_14_20, 1_17_09, 1_19_07, 1_31_63, 1_36_97, 1_37_00, 1_48_08, 1_53_06, 1_64_10, 1_67_91,
1_79_92, 1_92_03, 1_95_10, 2_07_24, 2_23_05, 2_29_35, 2_70_07, 3_01_09, 3_04_20, 3_34_09,
3_49_49, 4_02_83, 4_04_93, 4_05_49, 4_72_82, 4_91_46, 5_02_57, 5_03_59, 5_03_60, 5_03_61
]
__A = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_59, 5_03, 5_22, 5_42, 8_73,
8_93, 9_02, 9_18, 9_22, 9_31, 13_50, 18_53, 19_82, 24_60, 26_27,
32_46, 32_53, 32_68, 35_36, 38_46, 39_61, 41_83, 46_67, 65_85, 66_47,
72_73, 90_61, 93_83, 1_04_28, 1_09_29, 1_19_38, 1_20_33, 1_23_31, 1_25_62, 1_37_93,
1_41_57, 1_46_35, 1_52_65, 1_56_18, 1_65_53, 1_66_04, 1_83_62, 1_89_56, 2_00_75, 2_16_75,
2_25_20, 2_61_30, 2_61_61, 2_64_35, 2_82_79, 2_94_64, 3_16_50, 3_23_02, 3_24_70, 3_68_65,
4_28_63, 4_74_25, 4_98_70, 5_02_54, 5_02_58, 5_03_60, 5_03_61, 5_03_62
]
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''whisper'''
snake_case_ = ['''past_key_values''']
snake_case_ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowerCamelCase__=51_865 , lowerCamelCase__=80 , lowerCamelCase__=6 , lowerCamelCase__=4 , lowerCamelCase__=6 , lowerCamelCase__=4 , lowerCamelCase__=1_536 , lowerCamelCase__=1_536 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=50_257 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__="gelu" , lowerCamelCase__=256 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.02 , lowerCamelCase__=False , lowerCamelCase__=1_500 , lowerCamelCase__=448 , lowerCamelCase__=50_256 , lowerCamelCase__=50_256 , lowerCamelCase__=50_256 , lowerCamelCase__=None , lowerCamelCase__=[220, 50_256] , lowerCamelCase__=False , lowerCamelCase__=256 , lowerCamelCase__=False , lowerCamelCase__=0.05 , lowerCamelCase__=10 , lowerCamelCase__=2 , lowerCamelCase__=0.0 , lowerCamelCase__=10 , lowerCamelCase__=0 , lowerCamelCase__=7 , **lowerCamelCase__ , ) -> str:
'''simple docstring'''
__lowerCamelCase = vocab_size
__lowerCamelCase = num_mel_bins
__lowerCamelCase = d_model
__lowerCamelCase = encoder_layers
__lowerCamelCase = encoder_attention_heads
__lowerCamelCase = decoder_layers
__lowerCamelCase = decoder_attention_heads
__lowerCamelCase = decoder_ffn_dim
__lowerCamelCase = encoder_ffn_dim
__lowerCamelCase = dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = activation_function
__lowerCamelCase = init_std
__lowerCamelCase = encoder_layerdrop
__lowerCamelCase = decoder_layerdrop
__lowerCamelCase = use_cache
__lowerCamelCase = encoder_layers
__lowerCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCamelCase = max_source_positions
__lowerCamelCase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
__lowerCamelCase = classifier_proj_size
__lowerCamelCase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCamelCase = apply_spec_augment
__lowerCamelCase = mask_time_prob
__lowerCamelCase = mask_time_length
__lowerCamelCase = mask_time_min_masks
__lowerCamelCase = mask_feature_prob
__lowerCamelCase = mask_feature_length
__lowerCamelCase = mask_feature_min_masks
__lowerCamelCase = median_filter_width
super().__init__(
pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , is_encoder_decoder=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , suppress_tokens=lowerCamelCase__ , begin_suppress_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
@property
def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
__lowerCamelCase = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
__lowerCamelCase = {0: 'batch'}
else:
__lowerCamelCase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase__ , direction='inputs' )
return common_inputs
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = -1 , lowerCamelCase__ = -1 , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = 22_050 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 220 , ) -> Mapping[str, Any]:
'''simple docstring'''
__lowerCamelCase = OrderedDict()
__lowerCamelCase = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowerCamelCase__ , framework=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , time_duration=lowerCamelCase__ , frequency=lowerCamelCase__ , )
__lowerCamelCase = encoder_inputs['input_features'].shape[2]
__lowerCamelCase = encoder_sequence_length // 2 if self.use_past else seq_length
__lowerCamelCase = super().generate_dummy_inputs(
preprocessor.tokenizer , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = encoder_inputs.pop('input_features' )
__lowerCamelCase = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
__lowerCamelCase = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def lowercase_ ( self ) -> float:
'''simple docstring'''
return 1e-3
| 348 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = KandinskyVaaImgaImgPipeline
snake_case_ = ['''image_embeds''', '''negative_image_embeds''', '''image''']
snake_case_ = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
snake_case_ = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
snake_case_ = False
@property
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
return 32
@property
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
return 32
@property
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.time_input_dim
@property
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowercase_ ( self ) -> int:
'''simple docstring'''
return 100
@property
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
__lowerCamelCase = UNetaDConditionModel(**lowerCamelCase__ )
return model
@property
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = self.dummy_unet
__lowerCamelCase = self.dummy_movq
__lowerCamelCase = {
'''num_train_timesteps''': 1_000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_00_85,
'''beta_end''': 0.0_12,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
__lowerCamelCase = DDIMScheduler(**lowerCamelCase__ )
__lowerCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=0 ) -> int:
'''simple docstring'''
__lowerCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
__lowerCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowerCamelCase__ )
# create init_image
__lowerCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCamelCase = Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert('RGB' ).resize((256, 256) )
if str(lowerCamelCase__ ).startswith('mps' ):
__lowerCamelCase = torch.manual_seed(lowerCamelCase__ )
else:
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
__lowerCamelCase = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = '''cpu'''
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**lowerCamelCase__ )
__lowerCamelCase = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) )
__lowerCamelCase = output.images
__lowerCamelCase = pipe(
**self.get_dummy_inputs(lowerCamelCase__ ) , return_dict=lowerCamelCase__ , )[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase = np.array(
[0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_img2img_frog.npy' )
__lowerCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
__lowerCamelCase = '''A red cartoon frog, 4k'''
__lowerCamelCase = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase__ )
__lowerCamelCase = KandinskyVaaImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder' , torch_dtype=torch.floataa )
__lowerCamelCase = pipeline.to(lowerCamelCase__ )
pipeline.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = torch.Generator(device='cpu' ).manual_seed(0 )
__lowerCamelCase = pipe_prior(
lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
__lowerCamelCase = pipeline(
image=lowerCamelCase__ , image_embeds=lowerCamelCase__ , negative_image_embeds=lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='np' , )
__lowerCamelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ )
| 352 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=14 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=0.02 , ) -> int:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = rotary_dim
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = initializer_range
__lowerCamelCase = None
__lowerCamelCase = vocab_size - 1
__lowerCamelCase = vocab_size - 1
__lowerCamelCase = vocab_size - 1
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowerCamelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
__lowerCamelCase = 20
__lowerCamelCase = model_class_name(lowerCamelCase__ )
__lowerCamelCase = model.init_cache(input_ids.shape[0] , lowerCamelCase__ )
__lowerCamelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
__lowerCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCamelCase = model(
input_ids[:, :-1] , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , position_ids=lowerCamelCase__ , )
__lowerCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCamelCase = model(
input_ids[:, -1:] , attention_mask=lowerCamelCase__ , past_key_values=outputs_cache.past_key_values , position_ids=lowerCamelCase__ , )
__lowerCamelCase = model(lowerCamelCase__ )
__lowerCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = 20
__lowerCamelCase = model_class_name(lowerCamelCase__ )
__lowerCamelCase = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__lowerCamelCase = model.init_cache(input_ids.shape[0] , lowerCamelCase__ )
__lowerCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCamelCase = model(
input_ids[:, :-1] , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , position_ids=lowerCamelCase__ , )
__lowerCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCamelCase = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowerCamelCase__ , position_ids=lowerCamelCase__ , )
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
__lowerCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
snake_case_ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = FlaxGPTJModelTester(self )
def lowercase_ ( self ) -> str:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
@tooslow
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
__lowerCamelCase = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=lowerCamelCase__ , truncation=lowerCamelCase__ )
__lowerCamelCase = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
__lowerCamelCase = False
__lowerCamelCase = model.config.eos_token_id
__lowerCamelCase = jax.jit(model.generate )
__lowerCamelCase = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
__lowerCamelCase = tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
__lowerCamelCase = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@is_pt_flax_cross_test
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCamelCase = getattr(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase = pt_inputs['input_ids'].shape
__lowerCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase__ ):
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = pt_model_class(lowerCamelCase__ ).eval()
__lowerCamelCase = model_class(lowerCamelCase__ , dtype=jnp.floataa )
__lowerCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCamelCase__ )
__lowerCamelCase = fx_state
with torch.no_grad():
__lowerCamelCase = pt_model(**lowerCamelCase__ ).to_tuple()
__lowerCamelCase = fx_model(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase__ )
__lowerCamelCase = model_class.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
__lowerCamelCase = fx_model_loaded(**lowerCamelCase__ ).to_tuple()
self.assertEqual(
len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCamelCase = getattr(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = pt_model_class(lowerCamelCase__ ).eval()
__lowerCamelCase = model_class(lowerCamelCase__ , dtype=jnp.floataa )
__lowerCamelCase = load_flax_weights_in_pytorch_model(lowerCamelCase__ , fx_model.params )
__lowerCamelCase , __lowerCamelCase = pt_inputs['input_ids'].shape
__lowerCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase__ ):
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = 0
__lowerCamelCase = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__lowerCamelCase = pt_model(**lowerCamelCase__ ).to_tuple()
__lowerCamelCase = fx_model(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase__ )
__lowerCamelCase = pt_model_class.from_pretrained(lowerCamelCase__ , from_flax=lowerCamelCase__ )
with torch.no_grad():
__lowerCamelCase = pt_model_loaded(**lowerCamelCase__ ).to_tuple()
self.assertEqual(
len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowerCamelCase = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
__lowerCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
| 348 | 0 |