code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''time_embed.0.weight''', '''time_embedding.linear_1.weight'''),
('''time_embed.0.bias''', '''time_embedding.linear_1.bias'''),
('''time_embed.2.weight''', '''time_embedding.linear_2.weight'''),
('''time_embed.2.bias''', '''time_embedding.linear_2.bias'''),
('''input_blocks.0.0.weight''', '''conv_in.weight'''),
('''input_blocks.0.0.bias''', '''conv_in.bias'''),
('''out.0.weight''', '''conv_norm_out.weight'''),
('''out.0.bias''', '''conv_norm_out.bias'''),
('''out.2.weight''', '''conv_out.weight'''),
('''out.2.bias''', '''conv_out.bias'''),
]
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''in_layers.0''', '''norm1'''),
('''in_layers.2''', '''conv1'''),
('''out_layers.0''', '''norm2'''),
('''out_layers.3''', '''conv2'''),
('''emb_layers.1''', '''time_emb_proj'''),
('''skip_connection''', '''conv_shortcut'''),
]
lowerCAmelCase__ = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowerCAmelCase__ = f'down_blocks.{i}.resnets.{j}.'
lowerCAmelCase__ = f'input_blocks.{3*i + j + 1}.0.'
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowerCAmelCase__ = f'down_blocks.{i}.attentions.{j}.'
lowerCAmelCase__ = f'input_blocks.{3*i + j + 1}.1.'
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowerCAmelCase__ = f'up_blocks.{i}.resnets.{j}.'
lowerCAmelCase__ = f'output_blocks.{3*i + j}.0.'
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowerCAmelCase__ = f'up_blocks.{i}.attentions.{j}.'
lowerCAmelCase__ = f'output_blocks.{3*i + j}.1.'
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowerCAmelCase__ = f'down_blocks.{i}.downsamplers.0.conv.'
lowerCAmelCase__ = f'input_blocks.{3*(i+1)}.0.op.'
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowerCAmelCase__ = f'up_blocks.{i}.upsamplers.0.'
lowerCAmelCase__ = f'output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowerCAmelCase__ = '''mid_block.attentions.0.'''
lowerCAmelCase__ = '''middle_block.1.'''
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowerCAmelCase__ = f'mid_block.resnets.{j}.'
lowerCAmelCase__ = f'middle_block.{2*j}.'
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def _A ( A__ ):
"""simple docstring"""
__lowercase = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
__lowercase = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
__lowercase = v.replace(A__ , A__ )
__lowercase = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
__lowercase = v.replace(A__ , A__ )
__lowercase = v
__lowercase = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''nin_shortcut''', '''conv_shortcut'''),
('''norm_out''', '''conv_norm_out'''),
('''mid.attn_1.''', '''mid_block.attentions.0.'''),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowerCAmelCase__ = f'encoder.down_blocks.{i}.resnets.{j}.'
lowerCAmelCase__ = f'encoder.down.{i}.block.{j}.'
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowerCAmelCase__ = f'down_blocks.{i}.downsamplers.0.'
lowerCAmelCase__ = f'down.{i}.downsample.'
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowerCAmelCase__ = f'up_blocks.{i}.upsamplers.0.'
lowerCAmelCase__ = f'up.{3-i}.upsample.'
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowerCAmelCase__ = f'decoder.up_blocks.{i}.resnets.{j}.'
lowerCAmelCase__ = f'decoder.up.{3-i}.block.{j}.'
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowerCAmelCase__ = f'mid_block.resnets.{i}.'
lowerCAmelCase__ = f'mid.block_{i+1}.'
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''norm.''', '''group_norm.'''),
('''q.''', '''query.'''),
('''k.''', '''key.'''),
('''v.''', '''value.'''),
('''proj_out.''', '''proj_attn.'''),
]
def _A ( A__ ):
"""simple docstring"""
return w.reshape(*w.shape , 1 , 1 )
def _A ( A__ ):
"""simple docstring"""
__lowercase = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
__lowercase = v.replace(A__ , A__ )
__lowercase = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
__lowercase = v.replace(A__ , A__ )
__lowercase = v
__lowercase = {v: vae_state_dict[k] for k, v in mapping.items()}
__lowercase = ['''q''', '''k''', '''v''', '''proj_out''']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F"mid.attn_1.{weight_name}.weight" in k:
print(F"Reshaping {k} for SD format" )
__lowercase = reshape_weight_for_sd(A__ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''resblocks.''', '''text_model.encoder.layers.'''),
('''ln_1''', '''layer_norm1'''),
('''ln_2''', '''layer_norm2'''),
('''.c_fc.''', '''.fc1.'''),
('''.c_proj.''', '''.fc2.'''),
('''.attn''', '''.self_attn'''),
('''ln_final.''', '''transformer.text_model.final_layer_norm.'''),
('''token_embedding.weight''', '''transformer.text_model.embeddings.token_embedding.weight'''),
('''positional_embedding''', '''transformer.text_model.embeddings.position_embedding.weight'''),
]
lowerCAmelCase__ = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowerCAmelCase__ = re.compile('''|'''.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowerCAmelCase__ = {'''q''': 0, '''k''': 1, '''v''': 2}
def _A ( A__ ):
"""simple docstring"""
__lowercase = {}
__lowercase = {}
__lowercase = {}
for k, v in text_enc_dict.items():
if (
k.endswith('''.self_attn.q_proj.weight''' )
or k.endswith('''.self_attn.k_proj.weight''' )
or k.endswith('''.self_attn.v_proj.weight''' )
):
__lowercase = k[: -len('''.q_proj.weight''' )]
__lowercase = k[-len('''q_proj.weight''' )]
if k_pre not in capture_qkv_weight:
__lowercase = [None, None, None]
__lowercase = v
continue
if (
k.endswith('''.self_attn.q_proj.bias''' )
or k.endswith('''.self_attn.k_proj.bias''' )
or k.endswith('''.self_attn.v_proj.bias''' )
):
__lowercase = k[: -len('''.q_proj.bias''' )]
__lowercase = k[-len('''q_proj.bias''' )]
if k_pre not in capture_qkv_bias:
__lowercase = [None, None, None]
__lowercase = v
continue
__lowercase = textenc_pattern.sub(lambda A__ : protected[re.escape(m.group(0 ) )] , A__ )
__lowercase = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
__lowercase = textenc_pattern.sub(lambda A__ : protected[re.escape(m.group(0 ) )] , A__ )
__lowercase = torch.cat(A__ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
__lowercase = textenc_pattern.sub(lambda A__ : protected[re.escape(m.group(0 ) )] , A__ )
__lowercase = torch.cat(A__ )
return new_state_dict
def _A ( A__ ):
"""simple docstring"""
return text_enc_dict
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--use_safetensors''', action='''store_true''', help='''Save weights use safetensors, default is ckpt.'''
)
lowerCAmelCase__ = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowerCAmelCase__ = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.safetensors''')
lowerCAmelCase__ = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.safetensors''')
lowerCAmelCase__ = osp.join(args.model_path, '''text_encoder''', '''model.safetensors''')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowerCAmelCase__ = load_file(unet_path, device='''cpu''')
else:
lowerCAmelCase__ = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.bin''')
lowerCAmelCase__ = torch.load(unet_path, map_location='''cpu''')
if osp.exists(vae_path):
lowerCAmelCase__ = load_file(vae_path, device='''cpu''')
else:
lowerCAmelCase__ = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.bin''')
lowerCAmelCase__ = torch.load(vae_path, map_location='''cpu''')
if osp.exists(text_enc_path):
lowerCAmelCase__ = load_file(text_enc_path, device='''cpu''')
else:
lowerCAmelCase__ = osp.join(args.model_path, '''text_encoder''', '''pytorch_model.bin''')
lowerCAmelCase__ = torch.load(text_enc_path, map_location='''cpu''')
# Convert the UNet model
lowerCAmelCase__ = convert_unet_state_dict(unet_state_dict)
lowerCAmelCase__ = {'''model.diffusion_model.''' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowerCAmelCase__ = convert_vae_state_dict(vae_state_dict)
lowerCAmelCase__ = {'''first_stage_model.''' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowerCAmelCase__ = '''text_model.encoder.layers.22.layer_norm2.bias''' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowerCAmelCase__ = {'''transformer.''' + k: v for k, v in text_enc_dict.items()}
lowerCAmelCase__ = convert_text_enc_state_dict_vaa(text_enc_dict)
lowerCAmelCase__ = {'''cond_stage_model.model.''' + k: v for k, v in text_enc_dict.items()}
else:
lowerCAmelCase__ = convert_text_enc_state_dict(text_enc_dict)
lowerCAmelCase__ = {'''cond_stage_model.transformer.''' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowerCAmelCase__ = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowerCAmelCase__ = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowerCAmelCase__ = {'''state_dict''': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 41 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
__snake_case = parser.parse_args()
__snake_case = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
__snake_case = CLIPImageProcessor()
__snake_case = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
__snake_case = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 1 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'llama'
SCREAMING_SNAKE_CASE_ = ['past_key_values']
def __init__( self , SCREAMING_SNAKE_CASE_=32000 , SCREAMING_SNAKE_CASE_=4096 , SCREAMING_SNAKE_CASE_=11008 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="silu" , SCREAMING_SNAKE_CASE_=2048 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1E-6 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = vocab_size
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = hidden_size
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = num_key_value_heads
lowerCamelCase_ = hidden_act
lowerCamelCase_ = initializer_range
lowerCamelCase_ = rms_norm_eps
lowerCamelCase_ = pretraining_tp
lowerCamelCase_ = use_cache
lowerCamelCase_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , tie_word_embeddings=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , SCREAMING_SNAKE_CASE_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f'''got {self.rope_scaling}''' )
lowerCamelCase_ = self.rope_scaling.get('type' , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.rope_scaling.get('factor' , SCREAMING_SNAKE_CASE_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 42 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 1 | 0 |
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
lowerCAmelCase = 5_0003
lowerCAmelCase = 5_0002
@require_sentencepiece
@require_tokenizers
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : int = PLBartTokenizer
_lowercase : Optional[Any] = None
_lowercase : int = False
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = PLBartTokenizer(UpperCamelCase_ , language_codes='''base''' , keep_accents=UpperCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self: int ) -> Tuple:
"""simple docstring"""
lowercase__ = PLBartTokenizer(UpperCamelCase_ , language_codes='''base''' , keep_accents=UpperCamelCase_ )
lowercase__ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowercase__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowercase__ = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowercase__ = tokenizer.convert_ids_to_tokens(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
lowercase__ = tokenizer.vocab_size
lowercase__ = [tokenizer.convert_ids_to_tokens(UpperCamelCase_ ) for x in range(end - 4 , UpperCamelCase_ )]
self.assertListEqual(UpperCamelCase_ , ['''__java__''', '''__python__''', '''__en_XX__''', '''<mask>'''] )
lowercase__ = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
lowercase__ = tokenizer(UpperCamelCase_ ).input_ids
self.assertEqual(
tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ ) , UpperCamelCase_ , )
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
lowercase__ = PLBartTokenizer(UpperCamelCase_ , language_codes='''multi''' , keep_accents=UpperCamelCase_ )
lowercase__ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowercase__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowercase__ = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowercase__ = tokenizer.convert_ids_to_tokens(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
lowercase__ = tokenizer.vocab_size
lowercase__ = [tokenizer.convert_ids_to_tokens(UpperCamelCase_ ) for x in range(end - 7 , UpperCamelCase_ )]
self.assertListEqual(
UpperCamelCase_ , ['''__java__''', '''__python__''', '''__en_XX__''', '''__javascript__''', '''__php__''', '''__ruby__''', '''__go__'''] )
lowercase__ = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
lowercase__ = tokenizer(UpperCamelCase_ ).input_ids
self.assertEqual(
tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ ) , UpperCamelCase_ , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _a ( unittest.TestCase ):
_lowercase : Union[str, Any] = '''uclanlp/plbart-python-en_XX'''
_lowercase : Any = [
'''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''',
'''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''',
]
_lowercase : Union[str, Any] = [
'''Returns the maximum value of a b c.''',
'''Sums the values of a b c.''',
]
_lowercase : Tuple = [
134,
5452,
33460,
33441,
33463,
33465,
33463,
33449,
988,
20,
33456,
19,
33456,
771,
39,
4258,
889,
3318,
33441,
33463,
33465,
33463,
33449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def lowerCamelCase_ ( cls: List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes='''base''' , src_lang='''python''' , tgt_lang='''en_XX''' )
lowercase__ = 1
return cls
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__java__'''] , 50_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__python__'''] , 50_002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__en_XX__'''] , 50_003 )
def lowerCamelCase_ ( self: Any ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Dict:
"""simple docstring"""
self.assertIn(UpperCamelCase_ , self.tokenizer.all_special_ids )
lowercase__ = [EN_CODE, 9_037, 33_442, 57, 752, 153, 14, 56, 18, 9, 2]
lowercase__ = self.tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
lowercase__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = ['''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''' * 20]
self.assertIsInstance(src_text[0] , UpperCamelCase_ )
lowercase__ = 10
lowercase__ = self.tokenizer(UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , UpperCamelCase_ )
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> List[Any]:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''__java__'''] ) , [50_004, 50_001] )
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = tempfile.mkdtemp()
lowercase__ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(UpperCamelCase_ )
lowercase__ = PLBartTokenizer.from_pretrained(UpperCamelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCamelCase_ )
@require_torch
def lowerCamelCase_ ( self: str ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , return_tensors='''pt''' )
lowercase__ = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , UpperCamelCase_ )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowerCamelCase_ ( self: List[Any] ) -> Dict:
"""simple docstring"""
lowercase__ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
lowercase__ = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
lowercase__ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.tokenizer(self.src_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=3 , return_tensors='''pt''' )
lowercase__ = self.tokenizer(
text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=10 , return_tensors='''pt''' )
lowercase__ = targets['''input_ids''']
lowercase__ = shift_tokens_right(UpperCamelCase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''java''' )
self.assertEqual(
nested_simplify(UpperCamelCase_ ) , {
# A, test, EOS, en_XX
'''input_ids''': [[150, 242, 2, 50_003]],
'''attention_mask''': [[1, 1, 1, 1]],
# java
'''forced_bos_token_id''': 50_001,
} , )
| 43 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
__snake_case = '''src/diffusers'''
# Matches is_xxx_available()
__snake_case = re.compile(r'''is\_([a-z_]*)_available\(\)''')
# Matches from xxx import bla
__snake_case = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
__snake_case = '''
{0} = None
'''
__snake_case = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
'''
__snake_case = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
def _A ( _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = _re_backend.findall(_lowercase )
if len(_lowercase ) == 0:
return None
return "_and_".join(_lowercase )
def _A ( ) -> Tuple:
"""simple docstring"""
with open(os.path.join(_lowercase , '__init__.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
__UpperCamelCase = f.readlines()
# Get to the point we do the actual imports for type checking
__UpperCamelCase = 0
__UpperCamelCase = {}
# Go through the end of the file
while line_index < len(_lowercase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
__UpperCamelCase = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('else:' ):
line_index += 1
line_index += 1
__UpperCamelCase = []
# Until we unindent, add backend objects to the list
while line_index < len(_lowercase ) and len(lines[line_index] ) > 1:
__UpperCamelCase = lines[line_index]
__UpperCamelCase = _re_single_line_import.search(_lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(_lowercase ) > 0:
__UpperCamelCase = objects
else:
line_index += 1
return backend_specific_objects
def _A ( _lowercase , _lowercase ) -> Union[str, Any]:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(_lowercase )
elif name.islower():
return DUMMY_FUNCTION.format(_lowercase , _lowercase )
else:
return DUMMY_CLASS.format(_lowercase , _lowercase )
def _A ( _lowercase=None ) -> Optional[Any]:
"""simple docstring"""
if backend_specific_objects is None:
__UpperCamelCase = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
__UpperCamelCase = {}
for backend, objects in backend_specific_objects.items():
__UpperCamelCase = '[' + ', '.join(f'''"{b}"''' for b in backend.split('_and_' ) ) + ']'
__UpperCamelCase = '# This file is autogenerated by the command `make fix-copies`, do not edit.\n'
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(_lowercase , _lowercase ) for o in objects] )
__UpperCamelCase = dummy_file
return dummy_files
def _A ( _lowercase=False ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
__UpperCamelCase = {'torch': 'pt'}
# Locate actual dummy modules and read their content.
__UpperCamelCase = os.path.join(_lowercase , 'utils' )
__UpperCamelCase = {
backend: os.path.join(_lowercase , f'''dummy_{short_names.get(_lowercase , _lowercase )}_objects.py''' )
for backend in dummy_files.keys()
}
__UpperCamelCase = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(_lowercase ):
with open(_lowercase , 'r' , encoding='utf-8' , newline='\n' ) as f:
__UpperCamelCase = f.read()
else:
__UpperCamelCase = ''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f'''Updating diffusers.utils.dummy_{short_names.get(_lowercase , _lowercase )}_objects.py as the main '''
'__init__ has new objects.' )
with open(dummy_file_paths[backend] , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'The main __init__ has objects that are not present in '
f'''diffusers.utils.dummy_{short_names.get(_lowercase , _lowercase )}_objects.py. Run `make fix-copies` '''
'to fix this.' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__snake_case = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 1 | 0 |
'''simple docstring'''
import pickle
import numpy as np
from matplotlib import pyplot as plt
class UpperCAmelCase__ :
def __init__( self : int,__A : List[str],__A : Tuple,__A : str,__A : str,__A : List[str],__A : int=0.2,__A : List[str]=0.2 ):
_lowerCamelCase : int = bp_numa
_lowerCamelCase : Optional[int] = bp_numa
_lowerCamelCase : Optional[int] = bp_numa
_lowerCamelCase : Union[str, Any] = conva_get[:2]
_lowerCamelCase : Any = conva_get[2]
_lowerCamelCase : int = size_pa
_lowerCamelCase : Any = rate_w
_lowerCamelCase : Any = rate_t
_lowerCamelCase : str = [
np.mat(-1 * np.random.rand(self.conva[0],self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
_lowerCamelCase : List[Any] = np.mat(-1 * np.random.rand(self.num_bpa,self.num_bpa ) + 0.5 )
_lowerCamelCase : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa,self.num_bpa ) + 0.5 )
_lowerCamelCase : Any = -2 * np.random.rand(self.conva[1] ) + 1
_lowerCamelCase : Tuple = -2 * np.random.rand(self.num_bpa ) + 1
_lowerCamelCase : List[str] = -2 * np.random.rand(self.num_bpa ) + 1
def lowerCamelCase_ ( self : Tuple,__A : int ):
# save model dict with pickle
_lowerCamelCase : Any = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(__A,"wb" ) as f:
pickle.dump(__A,__A )
print(f'Model saved: {save_path}' )
@classmethod
def lowerCamelCase_ ( cls : Any,__A : Dict ):
# read saved model
with open(__A,"rb" ) as f:
_lowerCamelCase : List[str] = pickle.load(__A ) # noqa: S301
_lowerCamelCase : Tuple = model_dic.get("conv1" )
conv_get.append(model_dic.get("step_conv1" ) )
_lowerCamelCase : List[str] = model_dic.get("size_pooling1" )
_lowerCamelCase : Dict = model_dic.get("num_bp1" )
_lowerCamelCase : List[str] = model_dic.get("num_bp2" )
_lowerCamelCase : Optional[Any] = model_dic.get("num_bp3" )
_lowerCamelCase : str = model_dic.get("rate_weight" )
_lowerCamelCase : Any = model_dic.get("rate_thre" )
# create model instance
_lowerCamelCase : Union[str, Any] = CNN(__A,__A,__A,__A,__A,__A,__A )
# modify model parameter
_lowerCamelCase : Dict = model_dic.get("w_conv1" )
_lowerCamelCase : Optional[int] = model_dic.get("wkj" )
_lowerCamelCase : Optional[Any] = model_dic.get("vji" )
_lowerCamelCase : Dict = model_dic.get("thre_conv1" )
_lowerCamelCase : Tuple = model_dic.get("thre_bp2" )
_lowerCamelCase : Optional[int] = model_dic.get("thre_bp3" )
return conv_ins
def lowerCamelCase_ ( self : Optional[Any],__A : Optional[Any] ):
return 1 / (1 + np.exp(-1 * x ))
def lowerCamelCase_ ( self : Dict,__A : str ):
return round(__A,3 )
def lowerCamelCase_ ( self : str,__A : int,__A : Any,__A : Union[str, Any],__A : Dict,__A : List[Any] ):
# convolution process
_lowerCamelCase : Optional[Any] = convs[0]
_lowerCamelCase : List[Any] = convs[1]
_lowerCamelCase : int = np.shape(__A )[0]
# get the data slice of original image data, data_focus
_lowerCamelCase : Tuple = []
for i_focus in range(0,size_data - size_conv + 1,__A ):
for j_focus in range(0,size_data - size_conv + 1,__A ):
_lowerCamelCase : List[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__A )
# calculate the feature map of every single kernel, and saved as list of matrix
_lowerCamelCase : Dict = []
_lowerCamelCase : Dict = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(__A ):
_lowerCamelCase : Optional[int] = []
for i_focus in range(len(__A ) ):
_lowerCamelCase : List[Any] = (
np.sum(np.multiply(data_focus[i_focus],w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(__A ) )
_lowerCamelCase : Union[str, Any] = np.asmatrix(__A ).reshape(
__A,__A )
data_featuremap.append(__A )
# expanding the data slice to One dimenssion
_lowerCamelCase : int = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__A ) )
_lowerCamelCase : Dict = np.asarray(__A )
return focus_list, data_featuremap
def lowerCamelCase_ ( self : Any,__A : Optional[Any],__A : Optional[Any],__A : int="average_pool" ):
# pooling process
_lowerCamelCase : Tuple = len(featuremaps[0] )
_lowerCamelCase : Tuple = int(size_map / size_pooling )
_lowerCamelCase : int = []
for i_map in range(len(__A ) ):
_lowerCamelCase : Optional[Any] = featuremaps[i_map]
_lowerCamelCase : int = []
for i_focus in range(0,__A,__A ):
for j_focus in range(0,__A,__A ):
_lowerCamelCase : int = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__A ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__A ) )
_lowerCamelCase : Optional[Any] = np.asmatrix(__A ).reshape(__A,__A )
featuremap_pooled.append(__A )
return featuremap_pooled
def lowerCamelCase_ ( self : Optional[Any],__A : List[str] ):
# expanding three dimension data to one dimension list
_lowerCamelCase : Union[str, Any] = []
for i in range(len(__A ) ):
_lowerCamelCase : int = np.shape(data[i] )
_lowerCamelCase : List[str] = data[i].reshape(1,shapes[0] * shapes[1] )
_lowerCamelCase : List[str] = data_listed.getA().tolist()[0]
data_expanded.extend(__A )
_lowerCamelCase : Tuple = np.asarray(__A )
return data_expanded
def lowerCamelCase_ ( self : Tuple,__A : Optional[int] ):
# expanding matrix to one dimension list
_lowerCamelCase : int = np.asarray(__A )
_lowerCamelCase : List[str] = np.shape(__A )
_lowerCamelCase : Tuple = data_mat.reshape(1,shapes[0] * shapes[1] )
return data_expanded
def lowerCamelCase_ ( self : List[str],__A : Any,__A : List[str],__A : List[Any],__A : Any,__A : Tuple ):
_lowerCamelCase : Tuple = []
_lowerCamelCase : List[Any] = 0
for i_map in range(__A ):
_lowerCamelCase : List[Any] = np.ones((size_map, size_map) )
for i in range(0,__A,__A ):
for j in range(0,__A,__A ):
_lowerCamelCase : int = pd_pool[
i_pool
]
_lowerCamelCase : Dict = i_pool + 1
_lowerCamelCase : Any = np.multiply(
__A,np.multiply(out_map[i_map],(1 - out_map[i_map]) ) )
pd_all.append(__A )
return pd_all
def lowerCamelCase_ ( self : Union[str, Any],__A : Dict,__A : Optional[Any],__A : Union[str, Any],__A : Optional[Any],__A : Union[str, Any],__A : Tuple=bool ):
# model traning
print("----------------------Start Training-------------------------" )
print((" - - Shape: Train_Data ", np.shape(__A )) )
print((" - - Shape: Teach_Data ", np.shape(__A )) )
_lowerCamelCase : Tuple = 0
_lowerCamelCase : Optional[Any] = []
_lowerCamelCase : List[str] = 1_0_0_0_0
while rp < n_repeat and mse >= error_accuracy:
_lowerCamelCase : List[Any] = 0
print(f'-------------Learning Time {rp}--------------' )
for p in range(len(__A ) ):
# print('------------Learning Image: %d--------------'%p)
_lowerCamelCase : List[str] = np.asmatrix(datas_train[p] )
_lowerCamelCase : Dict = np.asarray(datas_teach[p] )
_lowerCamelCase , _lowerCamelCase : int = self.convolute(
__A,self.conva,self.w_conva,self.thre_conva,conv_step=self.step_conva,)
_lowerCamelCase : int = self.pooling(__A,self.size_poolinga )
_lowerCamelCase : List[str] = np.shape(__A )
_lowerCamelCase : Optional[Any] = self._expand(__A )
_lowerCamelCase : List[str] = data_bp_input
_lowerCamelCase : Union[str, Any] = np.dot(__A,self.vji.T ) - self.thre_bpa
_lowerCamelCase : Optional[int] = self.sig(__A )
_lowerCamelCase : Union[str, Any] = np.dot(__A,self.wkj.T ) - self.thre_bpa
_lowerCamelCase : List[Any] = self.sig(__A )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
_lowerCamelCase : Tuple = np.multiply(
(data_teach - bp_outa),np.multiply(__A,(1 - bp_outa) ) )
_lowerCamelCase : List[str] = np.multiply(
np.dot(__A,self.wkj ),np.multiply(__A,(1 - bp_outa) ) )
_lowerCamelCase : List[Any] = np.dot(__A,self.vji )
_lowerCamelCase : Optional[int] = pd_i_all / (self.size_poolinga * self.size_poolinga)
_lowerCamelCase : int = pd_conva_pooled.T.getA().tolist()
_lowerCamelCase : Optional[int] = self._calculate_gradient_from_pool(
__A,__A,shape_featuremapa[0],shape_featuremapa[1],self.size_poolinga,)
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
_lowerCamelCase : str = self._expand_mat(pd_conva_all[k_conv] )
_lowerCamelCase : Optional[int] = self.rate_weight * np.dot(__A,__A )
_lowerCamelCase : Tuple = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
_lowerCamelCase : Optional[Any] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
_lowerCamelCase : List[str] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
_lowerCamelCase : Dict = self.vji + pd_j_all.T * bp_outa * self.rate_weight
_lowerCamelCase : List[Any] = self.thre_bpa - pd_k_all * self.rate_thre
_lowerCamelCase : Optional[int] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
_lowerCamelCase : List[str] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
_lowerCamelCase : List[Any] = rp + 1
_lowerCamelCase : str = error_count / patterns
all_mse.append(__A )
def draw_error():
_lowerCamelCase : List[str] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(__A,"+-" )
plt.plot(__A,"r--" )
plt.xlabel("Learning Times" )
plt.ylabel("All_mse" )
plt.grid(__A,alpha=0.5 )
plt.show()
print("------------------Training Complished---------------------" )
print((" - - Training epoch: ", rp, f' - - Mse: {mse:.6f}') )
if draw_e:
draw_error()
return mse
def lowerCamelCase_ ( self : int,__A : List[Any] ):
# model predict
_lowerCamelCase : Any = []
print("-------------------Start Testing-------------------------" )
print((" - - Shape: Test_Data ", np.shape(__A )) )
for p in range(len(__A ) ):
_lowerCamelCase : Optional[int] = np.asmatrix(datas_test[p] )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.convolute(
__A,self.conva,self.w_conva,self.thre_conva,conv_step=self.step_conva,)
_lowerCamelCase : Optional[int] = self.pooling(__A,self.size_poolinga )
_lowerCamelCase : int = self._expand(__A )
_lowerCamelCase : Any = data_bp_input
_lowerCamelCase : Union[str, Any] = bp_outa * self.vji.T - self.thre_bpa
_lowerCamelCase : Tuple = self.sig(__A )
_lowerCamelCase : Union[str, Any] = bp_outa * self.wkj.T - self.thre_bpa
_lowerCamelCase : str = self.sig(__A )
produce_out.extend(bp_outa.getA().tolist() )
_lowerCamelCase : Union[str, Any] = [list(map(self.do_round,__A ) ) for each in produce_out]
return np.asarray(__A )
def lowerCamelCase_ ( self : Any,__A : str ):
# return the data of image after convoluting process so we can check it out
_lowerCamelCase : Any = np.asmatrix(__A )
_lowerCamelCase , _lowerCamelCase : Optional[int] = self.convolute(
__A,self.conva,self.w_conva,self.thre_conva,conv_step=self.step_conva,)
_lowerCamelCase : Optional[int] = self.pooling(__A,self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass | 44 |
import string
def _A ( _lowercase ) -> None:
"""simple docstring"""
for key in range(len(string.ascii_uppercase ) ):
__UpperCamelCase = ''
for symbol in message:
if symbol in string.ascii_uppercase:
__UpperCamelCase = string.ascii_uppercase.find(_lowercase )
__UpperCamelCase = num - key
if num < 0:
__UpperCamelCase = num + len(string.ascii_uppercase )
__UpperCamelCase = translated + string.ascii_uppercase[num]
else:
__UpperCamelCase = translated + symbol
print(f'''Decryption using Key #{key}: {translated}''' )
def _A ( ) -> None:
"""simple docstring"""
__UpperCamelCase = input('Encrypted message: ' )
__UpperCamelCase = message.upper()
decrypt(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 1 | 0 |
from __future__ import annotations
def A ( lowercase__ : int ) -> list[int]:
UpperCamelCase__ :Union[str, Any] = [True] * limit
UpperCamelCase__ :int = False
UpperCamelCase__ :Optional[Any] = False
UpperCamelCase__ :str = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
UpperCamelCase__ :List[Any] = i * 2
while index < limit:
UpperCamelCase__ :Tuple = False
UpperCamelCase__ :Tuple = index + i
UpperCamelCase__ :str = [2]
for i in range(3 , lowercase__ , 2 ):
if is_prime[i]:
primes.append(lowercase__ )
return primes
def A ( lowercase__ : int = 100_0000 ) -> int:
UpperCamelCase__ :Any = prime_sieve(lowercase__ )
UpperCamelCase__ :Optional[int] = 0
UpperCamelCase__ :Optional[Any] = 0
for i in range(len(lowercase__ ) ):
for j in range(i + length , len(lowercase__ ) ):
UpperCamelCase__ :Any = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
UpperCamelCase__ :Union[str, Any] = j - i
UpperCamelCase__ :Any = sol
return largest
if __name__ == "__main__":
print(f'''{solution() = }''') | 45 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = KandinskyInpaintPipeline
_lowercase = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
_lowercase = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
_lowercase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_lowercase = False
@property
def snake_case_ ( self: int ):
'''simple docstring'''
return 32
@property
def snake_case_ ( self: str ):
'''simple docstring'''
return 32
@property
def snake_case_ ( self: Tuple ):
'''simple docstring'''
return self.time_input_dim
@property
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
return 100
@property
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def snake_case_ ( self: Any ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = MCLIPConfig(
numDims=self.cross_attention_dim,transformerDimensions=self.text_embedder_hidden_size,hidden_size=self.text_embedder_hidden_size,intermediate_size=37,num_attention_heads=4,num_hidden_layers=5,vocab_size=1005,)
__UpperCamelCase = MultilingualCLIP(A_ )
__UpperCamelCase = text_encoder.eval()
return text_encoder
@property
def snake_case_ ( self: Any ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__UpperCamelCase = UNetaDConditionModel(**A_ )
return model
@property
def snake_case_ ( self: str ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def snake_case_ ( self: str ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = self.dummy_tokenizer
__UpperCamelCase = self.dummy_unet
__UpperCamelCase = self.dummy_movq
__UpperCamelCase = DDIMScheduler(
num_train_timesteps=1000,beta_schedule='linear',beta_start=0.0_0_0_8_5,beta_end=0.0_1_2,clip_sample=A_,set_alpha_to_one=A_,steps_offset=1,prediction_type='epsilon',thresholding=A_,)
__UpperCamelCase = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def snake_case_ ( self: Tuple,A_: Optional[int],A_: Dict=0 ):
'''simple docstring'''
__UpperCamelCase = floats_tensor((1, self.cross_attention_dim),rng=random.Random(A_ ) ).to(A_ )
__UpperCamelCase = floats_tensor((1, self.cross_attention_dim),rng=random.Random(seed + 1 ) ).to(A_ )
# create init_image
__UpperCamelCase = floats_tensor((1, 3, 64, 64),rng=random.Random(A_ ) ).to(A_ )
__UpperCamelCase = image.cpu().permute(0,2,3,1 )[0]
__UpperCamelCase = Image.fromarray(np.uinta(A_ ) ).convert('RGB' ).resize((256, 256) )
# create mask
__UpperCamelCase = np.ones((64, 64),dtype=np.floataa )
__UpperCamelCase = 0
if str(A_ ).startswith('mps' ):
__UpperCamelCase = torch.manual_seed(A_ )
else:
__UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
__UpperCamelCase = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = 'cpu'
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = self.pipeline_class(**A_ )
__UpperCamelCase = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = pipe(**self.get_dummy_inputs(A_ ) )
__UpperCamelCase = output.images
__UpperCamelCase = pipe(
**self.get_dummy_inputs(A_ ),return_dict=A_,)[0]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
print(F'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' )
__UpperCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
__UpperCamelCase = np.ones((768, 768),dtype=np.floataa )
__UpperCamelCase = 0
__UpperCamelCase = 'a hat'
__UpperCamelCase = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior',torch_dtype=torch.floataa )
pipe_prior.to(A_ )
__UpperCamelCase = KandinskyInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-inpaint',torch_dtype=torch.floataa )
__UpperCamelCase = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
__UpperCamelCase = torch.Generator(device='cpu' ).manual_seed(0 )
__UpperCamelCase, __UpperCamelCase = pipe_prior(
A_,generator=A_,num_inference_steps=5,negative_prompt='',).to_tuple()
__UpperCamelCase = pipeline(
A_,image=A_,mask_image=A_,image_embeds=A_,negative_image_embeds=A_,generator=A_,num_inference_steps=100,height=768,width=768,output_type='np',)
__UpperCamelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(A_,A_ )
| 1 | 0 |
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_lowerCAmelCase : Optional[Any] = get_tests_dir('''fixtures''')
class A_ ( unittest.TestCase ):
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : List[str] = mock.Mock()
_lowerCamelCase : Union[str, Any] = 500
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : str = HTTPError
_lowerCamelCase : Dict = {}
# Download this model to make sure it's in the cache.
_lowerCamelCase : Optional[int] = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" ,return_value=__lowerCAmelCase ) as mock_head:
_lowerCamelCase : Optional[int] = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# This check we did call the fake head request
mock_head.assert_called()
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : int = WavaVecaFeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" )
@is_staging_test
class A_ ( unittest.TestCase ):
@classmethod
def _lowercase ( cls: int ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = TOKEN
HfFolder.save_token(__lowerCAmelCase )
@classmethod
def _lowercase ( cls: Optional[int] ):
'''simple docstring'''
try:
delete_repo(token=cls._token ,repo_id="test-feature-extractor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="valid_org/test-feature-extractor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="test-dynamic-feature-extractor" )
except HTTPError:
pass
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained(__lowerCAmelCase )
feature_extractor.push_to_hub("test-feature-extractor" ,use_auth_token=self._token )
_lowerCamelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained(F"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCAmelCase ,getattr(__lowerCAmelCase ,__lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token ,repo_id="test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__lowerCAmelCase ,repo_id="test-feature-extractor" ,push_to_hub=__lowerCAmelCase ,use_auth_token=self._token )
_lowerCamelCase : Tuple = WavaVecaFeatureExtractor.from_pretrained(F"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCAmelCase ,getattr(__lowerCAmelCase ,__lowerCAmelCase ) )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : Dict = WavaVecaFeatureExtractor.from_pretrained(__lowerCAmelCase )
feature_extractor.push_to_hub("valid_org/test-feature-extractor" ,use_auth_token=self._token )
_lowerCamelCase : Optional[int] = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCAmelCase ,getattr(__lowerCAmelCase ,__lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token ,repo_id="valid_org/test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__lowerCAmelCase ,repo_id="valid_org/test-feature-extractor-org" ,push_to_hub=__lowerCAmelCase ,use_auth_token=self._token )
_lowerCamelCase : str = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCAmelCase ,getattr(__lowerCAmelCase ,__lowerCAmelCase ) )
def _lowercase ( self: List[str] ):
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
_lowerCamelCase : Optional[Any] = CustomFeatureExtractor.from_pretrained(__lowerCAmelCase )
feature_extractor.push_to_hub("test-dynamic-feature-extractor" ,use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map ,{"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} ,)
_lowerCamelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
F"""{USER}/test-dynamic-feature-extractor""" ,trust_remote_code=__lowerCAmelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ ,"CustomFeatureExtractor" ) | 46 |
from typing import Any
class __lowerCamelCase :
def __init__( self: int,A_: Any ):
'''simple docstring'''
__UpperCamelCase = data
__UpperCamelCase = None
def __repr__( self: Any ):
'''simple docstring'''
return F'''Node({self.data})'''
class __lowerCamelCase :
def __init__( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = None
def __iter__( self: int ):
'''simple docstring'''
__UpperCamelCase = self.head
while node:
yield node.data
__UpperCamelCase = node.next
def __len__( self: List[str] ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self: Any ):
'''simple docstring'''
return "->".join([str(A_ ) for item in self] )
def __getitem__( self: int,A_: int ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self: int,A_: int,A_: Any ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
__UpperCamelCase = self.head
for _ in range(A_ ):
__UpperCamelCase = current.next
__UpperCamelCase = data
def snake_case_ ( self: Union[str, Any],A_: Any ):
'''simple docstring'''
self.insert_nth(len(self ),A_ )
def snake_case_ ( self: List[Any],A_: Any ):
'''simple docstring'''
self.insert_nth(0,A_ )
def snake_case_ ( self: Optional[Any],A_: int,A_: Any ):
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
__UpperCamelCase = Node(A_ )
if self.head is None:
__UpperCamelCase = new_node
elif index == 0:
__UpperCamelCase = self.head # link new_node to head
__UpperCamelCase = new_node
else:
__UpperCamelCase = self.head
for _ in range(index - 1 ):
__UpperCamelCase = temp.next
__UpperCamelCase = temp.next
__UpperCamelCase = new_node
def snake_case_ ( self: str ): # print every node data
'''simple docstring'''
print(self )
def snake_case_ ( self: int ):
'''simple docstring'''
return self.delete_nth(0 )
def snake_case_ ( self: str ): # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def snake_case_ ( self: Any,A_: int = 0 ):
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
__UpperCamelCase = self.head # default first node
if index == 0:
__UpperCamelCase = self.head.next
else:
__UpperCamelCase = self.head
for _ in range(index - 1 ):
__UpperCamelCase = temp.next
__UpperCamelCase = temp.next
__UpperCamelCase = temp.next.next
return delete_node.data
def snake_case_ ( self: Any ):
'''simple docstring'''
return self.head is None
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = None
__UpperCamelCase = self.head
while current:
# Store the current node's next node.
__UpperCamelCase = current.next
# Make the current node's next point backwards
__UpperCamelCase = prev
# Make the previous node be the current node
__UpperCamelCase = current
# Make the current node the next node (to progress iteration)
__UpperCamelCase = next_node
# Return prev in order to put the head at the end
__UpperCamelCase = prev
def _A ( ) -> None:
"""simple docstring"""
__UpperCamelCase = LinkedList()
assert linked_list.is_empty() is True
assert str(_lowercase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_lowercase ) == i
linked_list.insert_nth(_lowercase , i + 1 )
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_lowercase ) == 9
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
__UpperCamelCase = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(-8 , 1 ) )
def _A ( ) -> None:
"""simple docstring"""
__UpperCamelCase = [
-9,
1_00,
Node(77_34_51_12 ),
'dlrow olleH',
7,
55_55,
0,
-1_92.5_55_55,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
__UpperCamelCase = LinkedList()
for i in test_input:
linked_list.insert_tail(_lowercase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_lowercase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
__UpperCamelCase = linked_list.delete_head()
assert result == -9
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
__UpperCamelCase = linked_list.delete_tail()
assert result == 12.2
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
__UpperCamelCase = linked_list.delete_nth(10 )
assert result is None
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(_lowercase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_lowercase )
assert (
str(_lowercase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_lowercase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _A ( ) -> List[str]:
"""simple docstring"""
from doctest import testmod
testmod()
__UpperCamelCase = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(_lowercase )
print('\nReading/changing Node data using indexing:' )
print(f'''Element at Position 1: {linked_list[1]}''' )
__UpperCamelCase = input('Enter New Value: ' ).strip()
print('New list:' )
print(_lowercase )
print(f'''length of linked_list is : {len(_lowercase )}''' )
if __name__ == "__main__":
main()
| 1 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''spiece.model'''}
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {
'''bert_for_seq_generation''': (
'''https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'''
),
}
}
SCREAMING_SNAKE_CASE__ = {'''bert_for_seq_generation''': 512}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : int = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : List[int] = []
__SCREAMING_SNAKE_CASE : int = ['''input_ids''', '''attention_mask''']
def __init__( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="</s>" , SCREAMING_SNAKE_CASE__ : Any="<unk>" , SCREAMING_SNAKE_CASE__ : int="<pad>" , SCREAMING_SNAKE_CASE__ : List[str]="<::::>" , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE__ : Tuple , ):
'''simple docstring'''
__a : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
__a : int = vocab_file
__a : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE__ )
@property
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
__a : Dict = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ):
'''simple docstring'''
__a : Union[str, Any] = self.__dict__.copy()
__a : Any = None
return state
def __setstate__( self : int , SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
__a : str = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__a : str = {}
__a : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
return self.sp_model.piece_to_id(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
__a : int = self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__ )
return token
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
__a : Optional[Any] = []
__a : Optional[int] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) + token
__a : Dict = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__ )
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ )
return out_string.strip()
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a : Tuple = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as fi:
__a : List[str] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 47 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 1 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : List[Any] = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :str = 'falcon'
snake_case__ :Optional[Any] = ['past_key_values']
def __init__( self : int , __magic_name__ : Dict=65024 , __magic_name__ : Tuple=4544 , __magic_name__ : Union[str, Any]=32 , __magic_name__ : List[Any]=71 , __magic_name__ : Tuple=1E-5 , __magic_name__ : List[str]=0.02 , __magic_name__ : Optional[Any]=True , __magic_name__ : List[str]=0.0 , __magic_name__ : Dict=0.0 , __magic_name__ : Any=None , __magic_name__ : Optional[int]=False , __magic_name__ : List[Any]=False , __magic_name__ : Optional[Any]=True , __magic_name__ : Tuple=True , __magic_name__ : int=False , __magic_name__ : Optional[Any]=11 , __magic_name__ : Dict=11 , **__magic_name__ : List[Any] , ):
"""simple docstring"""
lowerCAmelCase__ = vocab_size
# Backward compatibility with n_embed kwarg
lowerCAmelCase__ = kwargs.pop("n_embed" , __magic_name__ )
lowerCAmelCase__ = hidden_size if n_embed is None else n_embed
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = layer_norm_epsilon
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = use_cache
lowerCAmelCase__ = hidden_dropout
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = bos_token_id
lowerCAmelCase__ = eos_token_id
lowerCAmelCase__ = num_attention_heads if num_kv_heads is None else num_kv_heads
lowerCAmelCase__ = alibi
lowerCAmelCase__ = new_decoder_architecture
lowerCAmelCase__ = multi_query # Ignored when new_decoder_architecture is True
lowerCAmelCase__ = parallel_attn
lowerCAmelCase__ = bias
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
return self.hidden_size // self.num_attention_heads
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return not self.alibi
| 48 |
__snake_case = {
'''a''': '''AAAAA''',
'''b''': '''AAAAB''',
'''c''': '''AAABA''',
'''d''': '''AAABB''',
'''e''': '''AABAA''',
'''f''': '''AABAB''',
'''g''': '''AABBA''',
'''h''': '''AABBB''',
'''i''': '''ABAAA''',
'''j''': '''BBBAA''',
'''k''': '''ABAAB''',
'''l''': '''ABABA''',
'''m''': '''ABABB''',
'''n''': '''ABBAA''',
'''o''': '''ABBAB''',
'''p''': '''ABBBA''',
'''q''': '''ABBBB''',
'''r''': '''BAAAA''',
'''s''': '''BAAAB''',
'''t''': '''BAABA''',
'''u''': '''BAABB''',
'''v''': '''BBBAB''',
'''w''': '''BABAA''',
'''x''': '''BABAB''',
'''y''': '''BABBA''',
'''z''': '''BABBB''',
''' ''': ''' ''',
}
__snake_case = {value: key for key, value in encode_dict.items()}
def _A ( _lowercase ) -> str:
"""simple docstring"""
__UpperCamelCase = ''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('encode() accepts only letters of the alphabet and spaces' )
return encoded
def _A ( _lowercase ) -> str:
"""simple docstring"""
if set(_lowercase ) - {"A", "B", " "} != set():
raise Exception('decode() accepts only \'A\', \'B\' and spaces' )
__UpperCamelCase = ''
for word in coded.split():
while len(_lowercase ) != 0:
decoded += decode_dict[word[:5]]
__UpperCamelCase = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowercase : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_lowercase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49 |
from collections.abc import Generator
from math import sin
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
if len(_lowercase ) != 32:
raise ValueError('Input must be of length 32' )
__UpperCamelCase = B''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__UpperCamelCase = format(_lowercase , '08x' )[-8:]
__UpperCamelCase = B''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
__UpperCamelCase = B''
for char in message:
bit_string += format(_lowercase , '08b' ).encode('utf-8' )
__UpperCamelCase = format(len(_lowercase ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(_lowercase ) % 5_12 != 4_48:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def _A ( _lowercase ) -> Generator[list[int], None, None]:
"""simple docstring"""
if len(_lowercase ) % 5_12 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(_lowercase ) , 5_12 ):
__UpperCamelCase = bit_string[pos : pos + 5_12]
__UpperCamelCase = []
for i in range(0 , 5_12 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def _A ( _lowercase ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__UpperCamelCase = format(_lowercase , '032b' )
__UpperCamelCase = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(_lowercase , 2 )
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
return (a + b) % 2**32
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
__UpperCamelCase = preprocess(_lowercase )
__UpperCamelCase = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
__UpperCamelCase = 0X67_45_23_01
__UpperCamelCase = 0Xef_cd_ab_89
__UpperCamelCase = 0X98_ba_dc_fe
__UpperCamelCase = 0X10_32_54_76
__UpperCamelCase = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(_lowercase ):
__UpperCamelCase = aa
__UpperCamelCase = ba
__UpperCamelCase = ca
__UpperCamelCase = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__UpperCamelCase = d ^ (b & (c ^ d))
__UpperCamelCase = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__UpperCamelCase = c ^ (d & (b ^ c))
__UpperCamelCase = (5 * i + 1) % 16
elif i <= 47:
__UpperCamelCase = b ^ c ^ d
__UpperCamelCase = (3 * i + 5) % 16
else:
__UpperCamelCase = c ^ (b | not_aa(_lowercase ))
__UpperCamelCase = (7 * i) % 16
__UpperCamelCase = (f + a + added_consts[i] + block_words[g]) % 2**32
__UpperCamelCase = d
__UpperCamelCase = c
__UpperCamelCase = b
__UpperCamelCase = sum_aa(_lowercase , left_rotate_aa(_lowercase , shift_amounts[i] ) )
# Add hashed chunk to running total
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = reformat_hex(_lowercase ) + reformat_hex(_lowercase ) + reformat_hex(_lowercase ) + reformat_hex(_lowercase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | 0 |
'''simple docstring'''
import math
UpperCamelCase : Union[str, Any] = 10
UpperCamelCase : Optional[Any] = 7
UpperCamelCase : str = BALLS_PER_COLOUR * NUM_COLOURS
def A__ ( __lowerCAmelCase : int = 20 ):
lowerCamelCase__ = math.comb(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = math.comb(NUM_BALLS - BALLS_PER_COLOUR , __lowerCAmelCase )
lowerCamelCase__ = NUM_COLOURS * (1 - missing_colour / total)
return F'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 50 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__snake_case = 0
__snake_case = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__snake_case = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__snake_case = tuple[int, int]
class __lowerCamelCase :
def __init__( self: str,A_: int,A_: int,A_: int,A_: int,A_: int,A_: Node | None,):
'''simple docstring'''
__UpperCamelCase = pos_x
__UpperCamelCase = pos_y
__UpperCamelCase = (pos_y, pos_x)
__UpperCamelCase = goal_x
__UpperCamelCase = goal_y
__UpperCamelCase = g_cost
__UpperCamelCase = parent
__UpperCamelCase = self.calculate_heuristic()
__UpperCamelCase = self.g_cost + self.h_cost
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = self.pos_x - self.goal_x
__UpperCamelCase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(A_ ) + abs(A_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self: int,A_: Node ):
'''simple docstring'''
return self.f_cost < other.f_cost
class __lowerCamelCase :
def __init__( self: Any,A_: TPosition,A_: TPosition ):
'''simple docstring'''
__UpperCamelCase = Node(start[1],start[0],goal[1],goal[0],0,A_ )
__UpperCamelCase = Node(goal[1],goal[0],goal[1],goal[0],9_9999,A_ )
__UpperCamelCase = [self.start]
__UpperCamelCase = []
__UpperCamelCase = False
def snake_case_ ( self: Any ):
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__UpperCamelCase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(A_ )
self.closed_nodes.append(A_ )
__UpperCamelCase = self.get_successors(A_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(A_ )
else:
# retrieve the best current path
__UpperCamelCase = self.open_nodes.pop(self.open_nodes.index(A_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(A_ )
else:
self.open_nodes.append(A_ )
return [self.start.pos]
def snake_case_ ( self: int,A_: Node ):
'''simple docstring'''
__UpperCamelCase = []
for action in delta:
__UpperCamelCase = parent.pos_x + action[1]
__UpperCamelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
A_,A_,self.target.pos_y,self.target.pos_x,parent.g_cost + 1,A_,) )
return successors
def snake_case_ ( self: Any,A_: Node | None ):
'''simple docstring'''
__UpperCamelCase = node
__UpperCamelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__UpperCamelCase = current_node.parent
path.reverse()
return path
class __lowerCamelCase :
def __init__( self: List[Any],A_: TPosition,A_: TPosition ):
'''simple docstring'''
__UpperCamelCase = AStar(A_,A_ )
__UpperCamelCase = AStar(A_,A_ )
__UpperCamelCase = False
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__UpperCamelCase = self.fwd_astar.open_nodes.pop(0 )
__UpperCamelCase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
A_,A_ )
self.fwd_astar.closed_nodes.append(A_ )
self.bwd_astar.closed_nodes.append(A_ )
__UpperCamelCase = current_bwd_node
__UpperCamelCase = current_fwd_node
__UpperCamelCase = {
self.fwd_astar: self.fwd_astar.get_successors(A_ ),
self.bwd_astar: self.bwd_astar.get_successors(A_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(A_ )
else:
# retrieve the best current path
__UpperCamelCase = astar.open_nodes.pop(
astar.open_nodes.index(A_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(A_ )
else:
astar.open_nodes.append(A_ )
return [self.fwd_astar.start.pos]
def snake_case_ ( self: List[str],A_: Node,A_: Node ):
'''simple docstring'''
__UpperCamelCase = self.fwd_astar.retrace_path(A_ )
__UpperCamelCase = self.bwd_astar.retrace_path(A_ )
bwd_path.pop()
bwd_path.reverse()
__UpperCamelCase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__snake_case = (0, 0)
__snake_case = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__snake_case = time.time()
__snake_case = AStar(init, goal)
__snake_case = a_star.search()
__snake_case = time.time() - start_time
print(f"""AStar execution time = {end_time:f} seconds""")
__snake_case = time.time()
__snake_case = BidirectionalAStar(init, goal)
__snake_case = time.time() - bd_start_time
print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 1 | 0 |
'''simple docstring'''
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Tuple , a__ : Tuple , a__ : Optional[Any]=13 , a__ : Tuple=7 , a__ : Dict=True , a__ : Dict=True , a__ : List[Any]=False , a__ : int=True , a__ : Optional[Any]=99 , a__ : Union[str, Any]=64 , a__ : Optional[int]=5 , a__ : Union[str, Any]=4 , a__ : Any=64 , a__ : Optional[int]="gelu" , a__ : Optional[int]=0.1 , a__ : Dict=0.1 , a__ : Optional[Any]=512 , a__ : Optional[Any]=16 , a__ : Dict=2 , a__ : int=0.02 , a__ : int=3 , a__ : str=4 , a__ : List[str]=None , ):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
def __snake_case ( self : List[str] ):
return MPNetConfig.from_pretrained('''microsoft/mpnet-base''' )
def __snake_case ( self : str ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self : Union[str, Any] ):
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __snake_case ( self : List[Any] , a__ : Dict , a__ : List[Any] , a__ : List[Any] , a__ : List[str] , a__ : str , a__ : str ):
UpperCAmelCase = MPNetModel(config=a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = model(a__ , a__ )
UpperCAmelCase = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __snake_case ( self : str , a__ : Tuple , a__ : List[str] , a__ : int , a__ : List[Any] , a__ : List[Any] , a__ : Union[str, Any] ):
UpperCAmelCase = MPNetForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = model(
a__ , attention_mask=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : Union[str, Any] , a__ : Any , a__ : Optional[int] , a__ : Any , a__ : Union[str, Any] , a__ : int , a__ : Optional[Any] ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = MPNetForSequenceClassification(a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = model(a__ , attention_mask=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : Any , a__ : int , a__ : Any , a__ : str , a__ : Dict , a__ : Tuple , a__ : List[Any] ):
UpperCAmelCase = self.num_choices
UpperCAmelCase = MPNetForMultipleChoice(config=a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = model(
a__ , attention_mask=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self : List[str] , a__ : Dict , a__ : List[Any] , a__ : List[Any] , a__ : int , a__ : Dict , a__ : Any ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = MPNetForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = model(a__ , attention_mask=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self : str ):
UpperCAmelCase = self.prepare_config_and_inputs()
((UpperCAmelCase), (UpperCAmelCase), (UpperCAmelCase), (UpperCAmelCase), (UpperCAmelCase), (UpperCAmelCase)) = config_and_inputs
UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =(
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
_lowerCamelCase =(
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase =False
_lowerCamelCase =True
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = MPNetModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=a__ , hidden_size=37 )
def __snake_case ( self : Optional[int] ):
self.config_tester.run_common_tests()
def __snake_case ( self : List[str] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*a__ )
def __snake_case ( self : Dict ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*a__ )
def __snake_case ( self : Any ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*a__ )
def __snake_case ( self : Tuple ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*a__ )
def __snake_case ( self : int ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*a__ )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __snake_case ( self : Tuple ):
UpperCAmelCase = MPNetModel.from_pretrained('''microsoft/mpnet-base''' )
UpperCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
UpperCAmelCase = model(a__ )[0]
UpperCAmelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , a__ )
UpperCAmelCase = torch.tensor(
[[[-0.0_550, 0.1_943, -0.0_740], [-0.0_562, 0.2_211, -0.0_579], [-0.0_437, 0.3_337, -0.0_641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , a__ , atol=1e-4 ) )
| 51 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__snake_case = get_tests_dir('''fixtures''')
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: int ):
'''simple docstring'''
__UpperCamelCase = mock.Mock()
__UpperCamelCase = 500
__UpperCamelCase = {}
__UpperCamelCase = HTTPError
__UpperCamelCase = {}
# Download this model to make sure it's in the cache.
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request',return_value=A_ ) as mock_head:
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' )
@is_staging_test
class __lowerCamelCase (unittest.TestCase ):
@classmethod
def snake_case_ ( cls: Tuple ):
'''simple docstring'''
__UpperCamelCase = TOKEN
HfFolder.save_token(A_ )
@classmethod
def snake_case_ ( cls: Tuple ):
'''simple docstring'''
try:
delete_repo(token=cls._token,repo_id='test-feature-extractor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token,repo_id='valid_org/test-feature-extractor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token,repo_id='test-dynamic-feature-extractor' )
except HTTPError:
pass
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('test-feature-extractor',use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
# Reset repo
delete_repo(token=self._token,repo_id='test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
A_,repo_id='test-feature-extractor',push_to_hub=A_,use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('valid_org/test-feature-extractor',use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
# Reset repo
delete_repo(token=self._token,repo_id='valid_org/test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
A_,repo_id='valid_org/test-feature-extractor-org',push_to_hub=A_,use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
def snake_case_ ( self: int ):
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
__UpperCamelCase = CustomFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('test-dynamic-feature-extractor',use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map,{'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'},)
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(
F'''{USER}/test-dynamic-feature-extractor''',trust_remote_code=A_ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__,'CustomFeatureExtractor' )
| 1 | 0 |
"""simple docstring"""
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def __A ( a_ :str) -> int: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def __A ( ) -> Union[str, Any]:
with parallel_backend('''spark'''):
assert ParallelBackendConfig.backend_name == "spark"
__a : List[Any] = [1, 2, 3]
with pytest.raises(a_):
with parallel_backend('''unsupported backend'''):
map_nested(a_ , a_ , num_proc=2)
with pytest.raises(a_):
with parallel_backend('''unsupported backend'''):
map_nested(a_ , a_ , num_proc=-1)
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('''num_proc''' , [2, -1])
def __A ( a_ :Optional[Any]) -> Optional[int]:
__a : Optional[Any] = [1, 2]
__a : Dict = {'''a''': 1, '''b''': 2}
__a : Tuple = {'''a''': [1, 2], '''b''': [3, 4]}
__a : Dict = {'''a''': {'''1''': 1}, '''b''': 2}
__a : str = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
__a : Dict = [2, 3]
__a : Tuple = {'''a''': 2, '''b''': 3}
__a : str = {'''a''': [2, 3], '''b''': [4, 5]}
__a : str = {'''a''': {'''1''': 2}, '''b''': 3}
__a : Union[str, Any] = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
with parallel_backend('''spark'''):
assert map_nested(a_ , a_ , num_proc=a_) == expected_map_nested_sa
assert map_nested(a_ , a_ , num_proc=a_) == expected_map_nested_sa
assert map_nested(a_ , a_ , num_proc=a_) == expected_map_nested_sa
assert map_nested(a_ , a_ , num_proc=a_) == expected_map_nested_sa
assert map_nested(a_ , a_ , num_proc=a_) == expected_map_nested_sa | 52 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__snake_case = 1_6
__snake_case = 3_2
def _A ( _lowercase , _lowercase = 16 , _lowercase = "bert-base-cased" ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = AutoTokenizer.from_pretrained(_lowercase )
__UpperCamelCase = load_dataset('glue' , 'mrpc' )
def tokenize_function(_lowercase ):
# max_length=None => use the model max length (it's actually the default)
__UpperCamelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowercase , max_length=_lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__UpperCamelCase = datasets.map(
_lowercase , batched=_lowercase , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_lowercase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCamelCase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowercase , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return tokenizer.pad(_lowercase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
__UpperCamelCase = DataLoader(
tokenized_datasets['train'] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
__UpperCamelCase = DataLoader(
tokenized_datasets['validation'] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
return train_dataloader, eval_dataloader
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCamelCase = config['lr']
__UpperCamelCase = int(config['num_epochs'] )
__UpperCamelCase = int(config['seed'] )
__UpperCamelCase = int(config['batch_size'] )
__UpperCamelCase = args.model_name_or_path
set_seed(_lowercase )
__UpperCamelCase, __UpperCamelCase = get_dataloaders(_lowercase , _lowercase , _lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCamelCase = AutoModelForSequenceClassification.from_pretrained(_lowercase , return_dict=_lowercase )
# Instantiate optimizer
__UpperCamelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__UpperCamelCase = optimizer_cls(params=model.parameters() , lr=_lowercase )
if accelerator.state.deepspeed_plugin is not None:
__UpperCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
__UpperCamelCase = 1
__UpperCamelCase = (len(_lowercase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=_lowercase , num_warmup_steps=0 , num_training_steps=_lowercase , )
else:
__UpperCamelCase = DummyScheduler(_lowercase , total_num_steps=_lowercase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# We need to keep track of how many total steps we have iterated over
__UpperCamelCase = 0
# We also need to keep track of the stating epoch so files are named properly
__UpperCamelCase = 0
# Now we train the model
__UpperCamelCase = evaluate.load('glue' , 'mrpc' )
__UpperCamelCase = 0
__UpperCamelCase = {}
for epoch in range(_lowercase , _lowercase ):
model.train()
for step, batch in enumerate(_lowercase ):
__UpperCamelCase = model(**_lowercase )
__UpperCamelCase = outputs.loss
__UpperCamelCase = loss / gradient_accumulation_steps
accelerator.backward(_lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
__UpperCamelCase = 0
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCamelCase = model(**_lowercase )
__UpperCamelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__UpperCamelCase, __UpperCamelCase = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_lowercase ) - 1:
__UpperCamelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__UpperCamelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_lowercase , references=_lowercase , )
__UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , _lowercase )
__UpperCamelCase = eval_metric['accuracy']
if best_performance < eval_metric["accuracy"]:
__UpperCamelCase = eval_metric['accuracy']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(_lowercase , _lowercase )
def _A ( ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=_lowercase , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_lowercase , )
parser.add_argument(
'--output_dir' , type=_lowercase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=_lowercase , default=_lowercase , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=_lowercase , default=3 , help='Number of train epochs.' , )
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(_lowercase , _lowercase )
if __name__ == "__main__":
main()
| 1 | 0 |
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int]=1_3 , lowerCAmelCase_ : Any=3_0 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Tuple=3_2 , lowerCAmelCase_ : Union[str, Any]=5 , lowerCAmelCase_ : str=4 , lowerCAmelCase_ : Tuple=3_7 , lowerCAmelCase_ : str="gelu" , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Any=1_0 , lowerCAmelCase_ : List[str]=0.02 , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : int=0.6 , lowerCAmelCase_ : Tuple=None , ) -> List[Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = mask_ratio
__lowerCAmelCase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
__lowerCAmelCase = (image_size // patch_size) ** 2
__lowerCAmelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowercase ( self : Any ) -> Optional[Any]:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def lowercase ( self : Any ) -> int:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowercase ( self : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] ) -> Union[str, Any]:
__lowerCAmelCase = ViTMAEModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : int ) -> List[str]:
__lowerCAmelCase = ViTMAEForPreTraining(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ )
__lowerCAmelCase = (self.image_size // self.patch_size) ** 2
__lowerCAmelCase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
__lowerCAmelCase = 1
__lowerCAmelCase = ViTMAEForPreTraining(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase = model(lowerCAmelCase_ )
__lowerCAmelCase = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowercase ( self : Tuple ) -> List[Any]:
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
a_ = {"""feature-extraction""": ViTMAEModel} if is_torch_available() else {}
a_ = False
a_ = False
a_ = False
a_ = False
def lowercase ( self : str ) -> Optional[Any]:
__lowerCAmelCase = ViTMAEModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=3_7 )
def lowercase ( self : List[Any] ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def lowercase ( self : List[str] ) -> Dict:
pass
def lowercase ( self : List[str] ) -> List[str]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear ) )
def lowercase ( self : int ) -> int:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] ) -> Any:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase ( self : List[Any] ) -> int:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] ) -> List[Any]:
# make masks reproducible
np.random.seed(2 )
__lowerCAmelCase = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
__lowerCAmelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__lowerCAmelCase = torch.from_numpy(lowerCAmelCase_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
__lowerCAmelCase = pt_noise
super().check_pt_tf_models(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Tuple ) -> Union[str, Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
__lowerCAmelCase = outputs[0].cpu().numpy()
__lowerCAmelCase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = model_class.from_pretrained(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
# Make sure we don't have nans
__lowerCAmelCase = after_outputs[0].cpu().numpy()
__lowerCAmelCase = 0
__lowerCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase_ , 1e-5 )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def lowercase ( self : Dict ) -> Optional[Any]:
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def lowercase ( self : str ) -> List[Any]:
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def lowercase ( self : int ) -> List[str]:
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def lowercase ( self : List[str] ) -> Optional[Any]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase ( self : List[Any] ) -> Any:
pass
@slow
def lowercase ( self : Dict ) -> List[Any]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = ViTMAEModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def a_ ( ):
__lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase ( self : Tuple ) -> List[Any]:
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def lowercase ( self : Optional[int] ) -> Union[str, Any]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
__lowerCAmelCase = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ).to(lowerCAmelCase_ )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
__lowerCAmelCase = ViTMAEConfig()
__lowerCAmelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
__lowerCAmelCase = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ , noise=torch.from_numpy(lowerCAmelCase_ ).to(device=lowerCAmelCase_ ) )
# verify the logits
__lowerCAmelCase = torch.Size((1, 1_9_6, 7_6_8) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(lowerCAmelCase_ ) , atol=1e-4 ) )
| 53 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __lowerCamelCase (_a ):
@slow
@require_torch
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny','prajjwal1/bert-tiny' )
__UpperCamelCase = BertTokenizer.from_pretrained('bert-base-uncased' )
__UpperCamelCase = bertabert.config.encoder.vocab_size
__UpperCamelCase = tokenizer.sep_token_id
__UpperCamelCase = tokenizer.cls_token_id
__UpperCamelCase = 128
__UpperCamelCase = datasets.load_dataset('cnn_dailymail','3.0.0',split='train[:1%]' )
__UpperCamelCase = datasets.load_dataset('cnn_dailymail','3.0.0',split='validation[:1%]' )
__UpperCamelCase = train_dataset.select(range(32 ) )
__UpperCamelCase = val_dataset.select(range(16 ) )
__UpperCamelCase = 4
def _map_to_encoder_decoder_inputs(A_: Dict ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__UpperCamelCase = tokenizer(batch['article'],padding='max_length',truncation=A_,max_length=512 )
__UpperCamelCase = tokenizer(batch['highlights'],padding='max_length',truncation=A_,max_length=128 )
__UpperCamelCase = inputs.input_ids
__UpperCamelCase = inputs.attention_mask
__UpperCamelCase = outputs.input_ids
__UpperCamelCase = outputs.input_ids.copy()
__UpperCamelCase = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
__UpperCamelCase = outputs.attention_mask
assert all(len(A_ ) == 512 for x in inputs.input_ids )
assert all(len(A_ ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(A_: str ):
__UpperCamelCase = pred.label_ids
__UpperCamelCase = pred.predictions
# all unnecessary tokens are removed
__UpperCamelCase = tokenizer.batch_decode(A_,skip_special_tokens=A_ )
__UpperCamelCase = tokenizer.batch_decode(A_,skip_special_tokens=A_ )
__UpperCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(A_ ) )] ) / len(A_ )
return {"accuracy": accuracy}
# map train dataset
__UpperCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs,batched=A_,batch_size=A_,remove_columns=['article', 'highlights'],)
train_dataset.set_format(
type='torch',columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'],)
# same for validation dataset
__UpperCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs,batched=A_,batch_size=A_,remove_columns=['article', 'highlights'],)
val_dataset.set_format(
type='torch',columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'],)
__UpperCamelCase = self.get_auto_remove_tmp_dir()
__UpperCamelCase = SeqaSeqTrainingArguments(
output_dir=A_,per_device_train_batch_size=A_,per_device_eval_batch_size=A_,predict_with_generate=A_,evaluation_strategy='steps',do_train=A_,do_eval=A_,warmup_steps=0,eval_steps=2,logging_steps=2,)
# instantiate trainer
__UpperCamelCase = SeqaSeqTrainer(
model=A_,args=A_,compute_metrics=_compute_metrics,train_dataset=A_,eval_dataset=A_,tokenizer=A_,)
# start training
trainer.train()
| 1 | 0 |
from __future__ import annotations
import os
from collections.abc import Mapping
__lowercase : List[str] =tuple[int, int]
class A :
def __init__( self: Any , _lowerCAmelCase: set[int] , _lowerCAmelCase: Mapping[EdgeT, int] ) -> None:
'''simple docstring'''
UpperCAmelCase_ =vertices
UpperCAmelCase_ ={
(min(_lowerCAmelCase ), max(_lowerCAmelCase )): weight for edge, weight in edges.items()
}
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: EdgeT , _lowerCAmelCase: int ) -> None:
'''simple docstring'''
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
UpperCAmelCase_ =weight
def lowerCAmelCase__ ( self: str ) -> Graph:
'''simple docstring'''
UpperCAmelCase_ =Graph({min(self.vertices )} , {} )
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
while len(subgraph.vertices ) < len(self.vertices ):
UpperCAmelCase_ =max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
UpperCAmelCase_ =edge
UpperCAmelCase_ =weight
subgraph.add_edge(_lowerCAmelCase , _lowerCAmelCase )
return subgraph
def a__ ( lowercase__ = "p107_network.txt" ):
'''simple docstring'''
UpperCAmelCase_ =os.path.abspath(os.path.dirname(lowercase__ ) )
UpperCAmelCase_ =os.path.join(lowercase__ , lowercase__ )
UpperCAmelCase_ ={}
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
with open(lowercase__ ) as f:
UpperCAmelCase_ =f.read().strip().split("\n" )
UpperCAmelCase_ =[line.split("," ) for line in data]
for edgea in range(1 , len(lowercase__ ) ):
for edgea in range(lowercase__ ):
if adjaceny_matrix[edgea][edgea] != "-":
UpperCAmelCase_ =int(adjaceny_matrix[edgea][edgea] )
UpperCAmelCase_ =Graph(set(range(len(lowercase__ ) ) ) , lowercase__ )
UpperCAmelCase_ =graph.prims_algorithm()
UpperCAmelCase_ =sum(graph.edges.values() )
UpperCAmelCase_ =sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 54 |
def _A ( _lowercase = 1_00 ) -> int:
"""simple docstring"""
__UpperCamelCase = 0
__UpperCamelCase = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 1 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE :Optional[Any] = {
'configuration_lxmert': ['LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LxmertConfig'],
'tokenization_lxmert': ['LxmertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Tuple = ['LxmertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :List[str] = [
'LxmertEncoder',
'LxmertForPreTraining',
'LxmertForQuestionAnswering',
'LxmertModel',
'LxmertPreTrainedModel',
'LxmertVisualFeatureEncoder',
'LxmertXLayer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Optional[Any] = [
'TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLxmertForPreTraining',
'TFLxmertMainLayer',
'TFLxmertModel',
'TFLxmertPreTrainedModel',
'TFLxmertVisualFeatureEncoder',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
SCREAMING_SNAKE_CASE :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 55 |
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def _A ( _lowercase , _lowercase=0 ) -> Dict:
"""simple docstring"""
return sorted(_lowercase , key=lambda _lowercase : x[column] )
def _A ( _lowercase , _lowercase , _lowercase=float('inf' ) ) -> List[Any]:
"""simple docstring"""
for i in range(points_counts - 1 ):
for j in range(i + 1 , _lowercase ):
__UpperCamelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__UpperCamelCase = current_dis
return min_dis
def _A ( _lowercase , _lowercase , _lowercase=float('inf' ) ) -> Tuple:
"""simple docstring"""
for i in range(min(6 , points_counts - 1 ) , _lowercase ):
for j in range(max(0 , i - 6 ) , _lowercase ):
__UpperCamelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__UpperCamelCase = current_dis
return min_dis
def _A ( _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
"""simple docstring"""
if points_counts <= 3:
return dis_between_closest_pair(_lowercase , _lowercase )
# recursion
__UpperCamelCase = points_counts // 2
__UpperCamelCase = closest_pair_of_points_sqr(
_lowercase , points_sorted_on_y[:mid] , _lowercase )
__UpperCamelCase = closest_pair_of_points_sqr(
_lowercase , points_sorted_on_y[mid:] , points_counts - mid )
__UpperCamelCase = min(_lowercase , _lowercase )
__UpperCamelCase = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(_lowercase )
__UpperCamelCase = dis_between_closest_in_strip(
_lowercase , len(_lowercase ) , _lowercase )
return min(_lowercase , _lowercase )
def _A ( _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = column_based_sort(_lowercase , column=0 )
__UpperCamelCase = column_based_sort(_lowercase , column=1 )
return (
closest_pair_of_points_sqr(
_lowercase , _lowercase , _lowercase )
) ** 0.5
if __name__ == "__main__":
__snake_case = [(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)]
print('''Distance:''', closest_pair_of_points(points, len(points)))
| 1 | 0 |
'''simple docstring'''
from collections import deque
def _a (lowercase__ : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case = len(lowercase__ )
__snake_case = deque()
__snake_case = [False for _ in range(lowercase__ )]
__snake_case = [-1 for _ in range(lowercase__ )]
__snake_case = index_of[:]
def strong_connect(lowercase__ : int , lowercase__ : List[Any] , lowercase__ : List[str] ):
__snake_case = index # the number when this node is seen
__snake_case = index # lowest rank node reachable from here
index += 1
stack.append(lowercase__ )
__snake_case = True
for w in g[v]:
if index_of[w] == -1:
__snake_case = strong_connect(lowercase__ , lowercase__ , lowercase__ )
__snake_case = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
__snake_case = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
__snake_case = []
__snake_case = stack.pop()
__snake_case = False
component.append(lowercase__ )
while w != v:
__snake_case = stack.pop()
__snake_case = False
component.append(lowercase__ )
components.append(lowercase__ )
return index
__snake_case = []
for v in range(lowercase__ ):
if index_of[v] == -1:
strong_connect(lowercase__ , 0 , lowercase__ )
return components
def _a (lowercase__ : Optional[int] , lowercase__ : List[str] ) -> Dict:
"""simple docstring"""
__snake_case = [[] for _ in range(lowercase__ )]
for u, v in edges:
g[u].append(lowercase__ )
return g
if __name__ == "__main__":
# Test
_a : Optional[Any] = 7
_a : str = [0, 0, 1, 2, 3, 3, 4, 4, 6]
_a : Dict = [1, 3, 2, 0, 1, 4, 5, 6, 5]
_a : Optional[Any] = [(u, v) for u, v in zip(source, target)]
_a : str = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 56 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''',
'''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''',
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''',
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''',
'''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'''
),
'''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class __lowerCamelCase (_a ):
_lowercase = """bert"""
def __init__( self: Any,A_: Dict=3_0522,A_: Optional[Any]=768,A_: Union[str, Any]=12,A_: List[Any]=12,A_: Optional[int]=3072,A_: Union[str, Any]="gelu",A_: List[str]=0.1,A_: Dict=0.1,A_: Optional[int]=512,A_: Optional[Any]=2,A_: Union[str, Any]=0.0_2,A_: List[Any]=1E-12,A_: Optional[int]=0,A_: List[Any]="absolute",A_: str=True,A_: Union[str, Any]=None,**A_: int,):
'''simple docstring'''
super().__init__(pad_token_id=A_,**A_ )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = position_embedding_type
__UpperCamelCase = use_cache
__UpperCamelCase = classifier_dropout
class __lowerCamelCase (_a ):
@property
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 1 | 0 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
A_ : str = logging.get_logger(__name__)
A_ : Tuple = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'encoder.layer_norm_for_extract': 'layer_norm_for_extract',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'label_embs_concat': 'label_embeddings_concat',
'mask_emb': 'masked_spec_embed',
'spk_proj': 'speaker_proj',
}
A_ : Any = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'label_embeddings_concat',
'speaker_proj',
'layer_norm_for_extract',
]
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
for attribute in key.split('.' ):
UpperCamelCase_: List[Any] = getattr(UpperCAmelCase__ , UpperCAmelCase__ )
if weight_type is not None:
UpperCamelCase_: Dict = getattr(UpperCAmelCase__ , UpperCAmelCase__ ).shape
else:
UpperCamelCase_: List[str] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
UpperCamelCase_: int = value
elif weight_type == "weight_g":
UpperCamelCase_: int = value
elif weight_type == "weight_v":
UpperCamelCase_: Dict = value
elif weight_type == "bias":
UpperCamelCase_: Union[str, Any] = value
else:
UpperCamelCase_: Any = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
UpperCamelCase_: List[str] = []
UpperCamelCase_: Dict = fairseq_model.state_dict()
UpperCamelCase_: Optional[int] = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase_: Tuple = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase_: str = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase_: int = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
UpperCamelCase_: List[Any] = True
if "*" in mapped_key:
UpperCamelCase_: Union[str, Any] = name.split(UpperCAmelCase__ )[0].split('.' )[-2]
UpperCamelCase_: int = mapped_key.replace('*' , UpperCAmelCase__ )
if "weight_g" in name:
UpperCamelCase_: Union[str, Any] = 'weight_g'
elif "weight_v" in name:
UpperCamelCase_: List[str] = 'weight_v'
elif "bias" in name:
UpperCamelCase_: Dict = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase_: List[str] = 'weight'
else:
UpperCamelCase_: List[str] = None
set_recursively(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[Any]:
UpperCamelCase_: Any = full_name.split('conv_layers.' )[-1]
UpperCamelCase_: Optional[int] = name.split('.' )
UpperCamelCase_: Union[str, Any] = int(items[0] )
UpperCamelCase_: Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
UpperCamelCase_: Dict = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
UpperCamelCase_: Optional[Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
UpperCamelCase_: List[str] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
UpperCamelCase_: str = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCAmelCase__ )
@torch.no_grad()
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=True ) -> str:
if config_path is not None:
UpperCamelCase_: int = UniSpeechSatConfig.from_pretrained(UpperCAmelCase__ )
else:
UpperCamelCase_: Tuple = UniSpeechSatConfig()
UpperCamelCase_: List[str] = ''
if is_finetuned:
UpperCamelCase_: List[str] = UniSpeechSatForCTC(UpperCAmelCase__ )
else:
UpperCamelCase_: int = UniSpeechSatForPreTraining(UpperCAmelCase__ )
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
UpperCamelCase_: Dict = model[0].eval()
recursively_load_weights(UpperCAmelCase__ , UpperCAmelCase__ )
hf_wavavec.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
A_ : List[str] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
A_ : Optional[Any] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 57 |
def _A ( _lowercase ) -> int:
"""simple docstring"""
assert column_title.isupper()
__UpperCamelCase = 0
__UpperCamelCase = len(_lowercase ) - 1
__UpperCamelCase = 0
while index >= 0:
__UpperCamelCase = (ord(column_title[index] ) - 64) * pow(26 , _lowercase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=9_9 , _lowercase=3_2 , _lowercase=5 , _lowercase=4 , _lowercase=3_7 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=5_1_2 , _lowercase=1_6 , _lowercase=2 , _lowercase=0.02 , _lowercase=4 , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = parent
snake_case_ : List[Any] = batch_size
snake_case_ : str = seq_length
snake_case_ : List[str] = is_training
snake_case_ : Optional[int] = use_attention_mask
snake_case_ : str = use_token_type_ids
snake_case_ : List[str] = use_labels
snake_case_ : Union[str, Any] = vocab_size
snake_case_ : List[Any] = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : Optional[int] = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : List[str] = attention_probs_dropout_prob
snake_case_ : Tuple = max_position_embeddings
snake_case_ : int = type_vocab_size
snake_case_ : Optional[Any] = type_sequence_label_size
snake_case_ : Optional[Any] = initializer_range
snake_case_ : List[str] = num_choices
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Tuple = None
if self.use_attention_mask:
snake_case_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : Tuple = None
if self.use_token_type_ids:
snake_case_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Any = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : str = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = config_and_inputs
snake_case_ : Any = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[int] = config_and_inputs
snake_case_ : Union[str, Any] = True
snake_case_ : Optional[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = True
_lowerCamelCase = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : str = FlaxRobertaPreLayerNormModelTester(self )
@slow
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case_ : Optional[int] = model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
snake_case_ : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
snake_case_ : str = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
snake_case_ : Dict = model(_lowercase )[0]
snake_case_ : Optional[Any] = [1, 1_1, 5_0_2_6_5]
self.assertEqual(list(output.shape ) , _lowercase )
# compare the actual values for a slice.
snake_case_ : Tuple = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[int] = FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
snake_case_ : Union[str, Any] = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
snake_case_ : Dict = model(_lowercase )[0]
# compare the actual values for a slice.
snake_case_ : List[str] = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
| 58 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _A ( ) -> int:
"""simple docstring"""
__UpperCamelCase = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
__UpperCamelCase = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ).convert('RGB' )
return image
def _A ( _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def _A ( _lowercase , _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = dct.pop(_lowercase )
__UpperCamelCase = val
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__UpperCamelCase = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
__UpperCamelCase = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__UpperCamelCase = torch.cat((q_bias, torch.zeros_like(_lowercase , requires_grad=_lowercase ), v_bias) )
__UpperCamelCase = qkv_bias
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
__UpperCamelCase = 3_64 if 'coco' in model_name else 2_24
__UpperCamelCase = BlipaVisionConfig(image_size=_lowercase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__UpperCamelCase = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=_lowercase ).to_dict()
elif "opt-6.7b" in model_name:
__UpperCamelCase = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=_lowercase ).to_dict()
elif "t5-xl" in model_name:
__UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
__UpperCamelCase = BlipaConfig(vision_config=_lowercase , text_config=_lowercase )
return config, image_size
@torch.no_grad()
def _A ( _lowercase , _lowercase=None , _lowercase=False ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
__UpperCamelCase = tokenizer('\n' , add_special_tokens=_lowercase ).input_ids[0]
__UpperCamelCase, __UpperCamelCase = get_blipa_config(_lowercase , eos_token_id=_lowercase )
__UpperCamelCase = BlipaForConditionalGeneration(_lowercase ).eval()
__UpperCamelCase = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
__UpperCamelCase, __UpperCamelCase = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
__UpperCamelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = load_model_and_preprocess(
name=_lowercase , model_type=_lowercase , is_eval=_lowercase , device=_lowercase )
original_model.eval()
print('Done!' )
# update state dict keys
__UpperCamelCase = original_model.state_dict()
__UpperCamelCase = create_rename_keys(_lowercase )
for src, dest in rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__UpperCamelCase = state_dict.pop(_lowercase )
if key.startswith('Qformer.bert' ):
__UpperCamelCase = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
__UpperCamelCase = key.replace('self' , 'attention' )
if "opt_proj" in key:
__UpperCamelCase = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
__UpperCamelCase = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
__UpperCamelCase = key.replace('opt' , 'language' )
if key.startswith('t5' ):
__UpperCamelCase = key.replace('t5' , 'language' )
__UpperCamelCase = val
# read in qv biases
read_in_q_v_bias(_lowercase , _lowercase )
__UpperCamelCase, __UpperCamelCase = hf_model.load_state_dict(_lowercase , strict=_lowercase )
assert len(_lowercase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__UpperCamelCase = load_demo_image()
__UpperCamelCase = vis_processors['eval'](_lowercase ).unsqueeze(0 ).to(_lowercase )
__UpperCamelCase = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(_lowercase )
# create processor
__UpperCamelCase = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=_lowercase , image_std=_lowercase )
__UpperCamelCase = BlipaProcessor(image_processor=_lowercase , tokenizer=_lowercase )
__UpperCamelCase = processor(images=_lowercase , return_tensors='pt' ).pixel_values.to(_lowercase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowercase , _lowercase )
original_model.to(_lowercase )
hf_model.to(_lowercase )
with torch.no_grad():
if "opt" in model_name:
__UpperCamelCase = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
__UpperCamelCase = hf_model(_lowercase , _lowercase ).logits
else:
__UpperCamelCase = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
__UpperCamelCase = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
__UpperCamelCase = hf_model(_lowercase , _lowercase , labels=_lowercase ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__UpperCamelCase = torch.tensor(
[[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=_lowercase )
assert torch.allclose(logits[0, :3, :3] , _lowercase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__UpperCamelCase = torch.tensor(
[[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=_lowercase )
else:
# cast to same type
__UpperCamelCase = logits.dtype
assert torch.allclose(original_logits.to(_lowercase ) , _lowercase , atol=1e-2 )
print('Looks ok!' )
print('Generating a caption...' )
__UpperCamelCase = ''
__UpperCamelCase = tokenizer(_lowercase , return_tensors='pt' ).input_ids.to(_lowercase )
__UpperCamelCase = original_model.generate({'image': original_pixel_values} )
__UpperCamelCase = hf_model.generate(
_lowercase , _lowercase , do_sample=_lowercase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , _lowercase )
__UpperCamelCase = input_ids.shape[1]
__UpperCamelCase = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowercase )
__UpperCamelCase = [text.strip() for text in output_text]
print('HF generation:' , _lowercase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowercase )
hf_model.save_pretrained(_lowercase )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
__snake_case = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
__snake_case = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 1 | 0 |
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
return {key.lstrip("-" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def lowerCAmelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__: Dict =ArgumentParser(
"HuggingFace Datasets CLI tool" , usage="datasets-cli <command> [<args>]" , allow_abbrev=__a )
lowerCamelCase__: Optional[Any] =parser.add_subparsers(help="datasets-cli command helpers" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(__a )
EnvironmentCommand.register_subcommand(__a )
TestCommand.register_subcommand(__a )
RunBeamCommand.register_subcommand(__a )
DummyDataCommand.register_subcommand(__a )
# Parse args
lowerCamelCase__ , lowerCamelCase__: str =parser.parse_known_args()
if not hasattr(__a , "func" ):
parser.print_help()
exit(1 )
lowerCamelCase__: Dict =parse_unknown_args(__a )
# Run
lowerCamelCase__: Any =args.func(__a , **__a )
service.run()
if __name__ == "__main__":
main()
| 59 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__snake_case = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
_lowercase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_lowercase = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_lowercase = field(default=_a , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_lowercase = field(
default=_a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __lowerCamelCase :
_lowercase = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
_lowercase = field(
default=_a , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
_lowercase = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_lowercase = field(
default=_a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _A ( ) -> str:
"""simple docstring"""
__UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
__UpperCamelCase = import_module('tasks' )
try:
__UpperCamelCase = getattr(_lowercase , model_args.task_type )
__UpperCamelCase = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _lowercase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
__UpperCamelCase = token_classification_task.get_labels(data_args.labels )
__UpperCamelCase = dict(enumerate(_lowercase ) )
__UpperCamelCase = len(_lowercase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowercase , idalabel=_lowercase , labelaid={label: i for i, label in enumerate(_lowercase )} , cache_dir=model_args.cache_dir , )
__UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
__UpperCamelCase = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , )
# Get datasets
__UpperCamelCase = (
TokenClassificationDataset(
token_classification_task=_lowercase , data_dir=data_args.data_dir , tokenizer=_lowercase , labels=_lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__UpperCamelCase = (
TokenClassificationDataset(
token_classification_task=_lowercase , data_dir=data_args.data_dir , tokenizer=_lowercase , labels=_lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(_lowercase , _lowercase ) -> Tuple[List[int], List[int]]:
__UpperCamelCase = np.argmax(_lowercase , axis=2 )
__UpperCamelCase, __UpperCamelCase = preds.shape
__UpperCamelCase = [[] for _ in range(_lowercase )]
__UpperCamelCase = [[] for _ in range(_lowercase )]
for i in range(_lowercase ):
for j in range(_lowercase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(_lowercase ) -> Dict:
__UpperCamelCase, __UpperCamelCase = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(_lowercase , _lowercase ),
"precision": precision_score(_lowercase , _lowercase ),
"recall": recall_score(_lowercase , _lowercase ),
"f1": fa_score(_lowercase , _lowercase ),
}
# Data collator
__UpperCamelCase = DataCollatorWithPadding(_lowercase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__UpperCamelCase = Trainer(
model=_lowercase , args=_lowercase , train_dataset=_lowercase , eval_dataset=_lowercase , compute_metrics=_lowercase , data_collator=_lowercase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCamelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__UpperCamelCase = trainer.evaluate()
__UpperCamelCase = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(_lowercase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , _lowercase , _lowercase )
writer.write('%s = %s\n' % (key, value) )
results.update(_lowercase )
# Predict
if training_args.do_predict:
__UpperCamelCase = TokenClassificationDataset(
token_classification_task=_lowercase , data_dir=data_args.data_dir , tokenizer=_lowercase , labels=_lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = trainer.predict(_lowercase )
__UpperCamelCase, __UpperCamelCase = align_predictions(_lowercase , _lowercase )
__UpperCamelCase = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(_lowercase , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , _lowercase , _lowercase )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
__UpperCamelCase = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(_lowercase , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(_lowercase , _lowercase , _lowercase )
return results
def _A ( _lowercase ) -> Dict:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 1 | 0 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case_ : List[Any] = [False] * len(_UpperCamelCase )
snake_case_ : int = [-1] * len(_UpperCamelCase )
def dfs(_UpperCamelCase , _UpperCamelCase ):
snake_case_ : Dict = True
snake_case_ : Dict = c
for u in graph[v]:
if not visited[u]:
dfs(_UpperCamelCase , 1 - c )
for i in range(len(_UpperCamelCase ) ):
if not visited[i]:
dfs(_UpperCamelCase , 0 )
for i in range(len(_UpperCamelCase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
lowerCAmelCase_ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 60 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def _A ( *_lowercase ) -> Tuple:
"""simple docstring"""
with open(_lowercase , 'r' ) as fh:
fcntl.flock(_lowercase , fcntl.LOCK_EX )
try:
print(*_lowercase )
finally:
fcntl.flock(_lowercase , fcntl.LOCK_UN )
__snake_case = int(os.environ['''LOCAL_RANK'''])
torch.cuda.set_device(local_rank)
__snake_case = torch.device('''cuda''', local_rank)
__snake_case = socket.gethostname()
__snake_case = f"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group('''nccl''')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__snake_case = dist.get_rank()
__snake_case = dist.get_world_size()
printflock(f"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(f"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(f"""{gpu} is broken""")
raise
| 1 | 0 |
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def a ( self : Any ) -> List[Any]:
torch.manual_seed(0 )
lowerCAmelCase__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def a ( self : Dict ) -> Union[str, Any]:
lowerCAmelCase__ = self.dummy_uncond_unet
lowerCAmelCase__ = ScoreSdeVeScheduler()
lowerCAmelCase__ = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
sde_ve.to(SCREAMING_SNAKE_CASE__ )
sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=SCREAMING_SNAKE_CASE__ ).images
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )[
0
]
lowerCAmelCase__ = image[0, -3:, -3:, -1]
lowerCAmelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def a ( self : int ) -> Union[str, Any]:
lowerCAmelCase__ = "google/ncsnpp-church-256"
lowerCAmelCase__ = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = ScoreSdeVeScheduler.from_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
sde_ve.to(SCREAMING_SNAKE_CASE__ )
sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = sde_ve(num_inference_steps=10 , output_type="numpy" , generator=SCREAMING_SNAKE_CASE__ ).images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase__ = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 61 |
import pytest
import datasets
# Import fixture modules as plugins
__snake_case = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def _A ( _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def _A ( _lowercase ) -> str:
"""simple docstring"""
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=_lowercase )
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
__UpperCamelCase = tmp_path_factory.getbasetemp() / 'cache'
__UpperCamelCase = test_hf_cache_home / 'datasets'
__UpperCamelCase = test_hf_cache_home / 'metrics'
__UpperCamelCase = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(_lowercase ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(_lowercase ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(_lowercase ) )
__UpperCamelCase = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(_lowercase ) )
__UpperCamelCase = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_lowercase ) )
@pytest.fixture(autouse=_lowercase , scope='session' )
def _A ( ) -> Dict:
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=_lowercase )
def _A ( _lowercase ) -> Tuple:
"""simple docstring"""
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , _lowercase )
@pytest.fixture
def _A ( _lowercase ) -> Any:
"""simple docstring"""
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , _lowercase )
| 1 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Tuple = '''donut-swin'''
UpperCamelCase_ : List[str] = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : List[Any] , UpperCAmelCase_ : Optional[int]=224 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : Union[str, Any]=96 , UpperCAmelCase_ : Union[str, Any]=[2, 2, 6, 2] , UpperCAmelCase_ : int=[3, 6, 12, 24] , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : Tuple=4.0 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : Tuple=0.0 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : List[Any]=1E-5 , **UpperCAmelCase_ : Any , ):
super().__init__(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : Optional[int] = patch_size
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : int = embed_dim
SCREAMING_SNAKE_CASE : Union[str, Any] = depths
SCREAMING_SNAKE_CASE : Any = len(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = num_heads
SCREAMING_SNAKE_CASE : List[str] = window_size
SCREAMING_SNAKE_CASE : Optional[int] = mlp_ratio
SCREAMING_SNAKE_CASE : Union[str, Any] = qkv_bias
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Dict = drop_path_rate
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : List[Any] = use_absolute_embeddings
SCREAMING_SNAKE_CASE : str = layer_norm_eps
SCREAMING_SNAKE_CASE : Tuple = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE : int = int(embed_dim * 2 ** (len(UpperCAmelCase_ ) - 1) )
| 62 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = VideoToVideoSDPipeline
_lowercase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"""video"""} ) - {"""image""", """width""", """height"""}
_lowercase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""video"""} ) - {"""image"""}
_lowercase = PipelineTesterMixin.required_optional_params - {"""latents"""}
_lowercase = False
# No `output_type`.
_lowercase = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64),layers_per_block=2,sample_size=32,in_channels=4,out_channels=4,down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D'),up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D'),cross_attention_dim=32,attention_head_dim=4,)
__UpperCamelCase = DDIMScheduler(
beta_start=0.0_0_0_8_5,beta_end=0.0_1_2,beta_schedule='scaled_linear',clip_sample=A_,set_alpha_to_one=A_,)
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64],in_channels=3,out_channels=3,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'],up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'],latent_channels=4,sample_size=128,)
torch.manual_seed(0 )
__UpperCamelCase = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,hidden_act='gelu',projection_dim=512,)
__UpperCamelCase = CLIPTextModel(A_ )
__UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__UpperCamelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def snake_case_ ( self: Union[str, Any],A_: Any,A_: Any=0 ):
'''simple docstring'''
__UpperCamelCase = floats_tensor((1, 3, 3, 32, 32),rng=random.Random(A_ ) ).to(A_ )
if str(A_ ).startswith('mps' ):
__UpperCamelCase = torch.manual_seed(A_ )
else:
__UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
__UpperCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'video': video,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = VideoToVideoSDPipeline(**A_ )
__UpperCamelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = self.get_dummy_inputs(A_ )
__UpperCamelCase = 'np'
__UpperCamelCase = sd_pipe(**A_ ).frames
__UpperCamelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
__UpperCamelCase = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(),reason='XFormers attention is only available with CUDA and `xformers` installed',)
def snake_case_ ( self: Any ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A_,expected_max_diff=5E-3 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def snake_case_ ( self: str ):
'''simple docstring'''
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def snake_case_ ( self: int ):
'''simple docstring'''
pass
def snake_case_ ( self: Any ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL',torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
__UpperCamelCase = torch.Generator(device='cpu' ).manual_seed(0 )
__UpperCamelCase = torch.randn((1, 10, 3, 1024, 576),generator=A_ )
__UpperCamelCase = video.to('cuda' )
__UpperCamelCase = 'Spiderman is surfing'
__UpperCamelCase = pipe(A_,video=A_,generator=A_,num_inference_steps=3,output_type='pt' ).frames
__UpperCamelCase = np.array([-1.0_4_5_8_9_8_4, -1.1_2_7_9_2_9_7, -0.9_6_6_3_0_8_6, -0.9_1_5_0_3_9_0_6, -0.7_5_0_9_7_6_5_6] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 1 | 0 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class a :
"""simple docstring"""
def __init__( self : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Tuple=2 , __lowercase : Any=32 , __lowercase : Any=16 , __lowercase : str=3 , __lowercase : Dict=True , __lowercase : Tuple=True , __lowercase : Any=32 , __lowercase : Dict=4 , __lowercase : List[str]=[0, 1, 2, 3] , __lowercase : Optional[int]=4 , __lowercase : Dict=37 , __lowercase : Union[str, Any]="gelu" , __lowercase : str=0.1 , __lowercase : List[Any]=0.1 , __lowercase : List[Any]=0.02 , __lowercase : Dict=3 , __lowercase : Optional[int]=[1, 384, 24, 24] , __lowercase : List[str]=True , __lowercase : Any=None , ) -> Union[str, Any]:
__UpperCAmelCase : List[Any] = parent
__UpperCAmelCase : Dict = batch_size
__UpperCAmelCase : Optional[Any] = image_size
__UpperCAmelCase : Tuple = patch_size
__UpperCAmelCase : Union[str, Any] = num_channels
__UpperCAmelCase : Union[str, Any] = is_training
__UpperCAmelCase : List[str] = use_labels
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : Optional[int] = num_hidden_layers
__UpperCAmelCase : Tuple = backbone_out_indices
__UpperCAmelCase : Union[str, Any] = num_attention_heads
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : List[str] = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : str = initializer_range
__UpperCAmelCase : Dict = num_labels
__UpperCAmelCase : List[str] = backbone_featmap_shape
__UpperCAmelCase : Dict = scope
__UpperCAmelCase : Optional[Any] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__UpperCAmelCase : List[Any] = (image_size // patch_size) ** 2
__UpperCAmelCase : Optional[int] = num_patches + 1
def UpperCAmelCase ( self : List[str] ) -> List[Any]:
__UpperCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : Tuple = None
if self.use_labels:
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__UpperCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
__UpperCAmelCase : Optional[Any] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [96, 192, 384, 768],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowercase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__lowercase , backbone_featmap_shape=self.backbone_featmap_shape , )
def UpperCAmelCase ( self : Optional[int] , __lowercase : Union[str, Any] , __lowercase : str , __lowercase : Optional[int] ) -> Dict:
__UpperCAmelCase : Union[str, Any] = DPTModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : Optional[Any] = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self : str , __lowercase : List[Any] , __lowercase : str , __lowercase : str ) -> int:
__UpperCAmelCase : Union[str, Any] = self.num_labels
__UpperCAmelCase : Optional[int] = DPTForDepthEstimation(__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : Tuple = model(__lowercase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def UpperCAmelCase ( self : int , __lowercase : Any , __lowercase : Dict , __lowercase : List[str] ) -> str:
__UpperCAmelCase : Dict = self.num_labels
__UpperCAmelCase : Any = DPTForSemanticSegmentation(__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : Dict = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCAmelCase ( self : Optional[int] ) -> List[str]:
__UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs
__UpperCAmelCase : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : List[str] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
a : List[str] = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a : Optional[Any] = False
a : Dict = False
a : Dict = False
def UpperCAmelCase ( self : List[Any] ) -> str:
__UpperCAmelCase : Union[str, Any] = DPTModelTester(self )
__UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=37 )
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def UpperCAmelCase ( self : str ) -> Optional[int]:
pass
def UpperCAmelCase ( self : int ) -> Optional[int]:
__UpperCAmelCase , __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Any = model_class(__lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCAmelCase : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowercase , nn.Linear ) )
def UpperCAmelCase ( self : Optional[int] ) -> str:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Any = model_class(__lowercase )
__UpperCAmelCase : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Dict = [*signature.parameters.keys()]
__UpperCAmelCase : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowercase )
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def UpperCAmelCase ( self : Tuple ) -> Any:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__lowercase )
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowercase )
def UpperCAmelCase ( self : Tuple ) -> List[str]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Optional[Any] = True
if model_class in get_values(__lowercase ):
continue
__UpperCAmelCase : List[str] = model_class(__lowercase )
model.to(__lowercase )
model.train()
__UpperCAmelCase : int = self._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
__UpperCAmelCase : List[str] = model(**__lowercase ).loss
loss.backward()
def UpperCAmelCase ( self : Any ) -> List[str]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__UpperCAmelCase , __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Any = False
__UpperCAmelCase : str = True
if model_class in get_values(__lowercase ) or not model_class.supports_gradient_checkpointing:
continue
__UpperCAmelCase : Optional[int] = model_class(__lowercase )
model.to(__lowercase )
model.gradient_checkpointing_enable()
model.train()
__UpperCAmelCase : Optional[Any] = self._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
__UpperCAmelCase : Tuple = model(**__lowercase ).loss
loss.backward()
def UpperCAmelCase ( self : Dict ) -> int:
__UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Union[str, Any] = _config_zero_init(__lowercase )
for model_class in self.all_model_classes:
__UpperCAmelCase : List[Any] = model_class(config=__lowercase )
# Skip the check for the backbone
__UpperCAmelCase : Union[str, Any] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__UpperCAmelCase : Any = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
pass
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__UpperCAmelCase : Tuple = DPTModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def UpperCAmelCase ( self : int ) -> int:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : str = """add"""
with self.assertRaises(__lowercase ):
__UpperCAmelCase : Union[str, Any] = DPTForDepthEstimation(__lowercase )
def lowerCamelCase__ ( ):
__UpperCAmelCase : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : Optional[int] ) -> str:
__UpperCAmelCase : Dict = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
__UpperCAmelCase : Union[str, Any] = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__lowercase )
__UpperCAmelCase : Optional[int] = prepare_img()
__UpperCAmelCase : Optional[int] = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__UpperCAmelCase : Tuple = model(**__lowercase )
__UpperCAmelCase : List[Any] = outputs.predicted_depth
# verify the predicted depth
__UpperCAmelCase : Dict = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , __lowercase )
__UpperCAmelCase : int = torch.tensor(
[[[5.6_437, 5.6_146, 5.6_511], [5.4_371, 5.5_649, 5.5_958], [5.5_215, 5.5_184, 5.5_293]]] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , __lowercase , atol=1e-4 ) )
| 63 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
__snake_case = parser.parse_args()
__snake_case = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
__snake_case = CLIPImageProcessor()
__snake_case = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
__snake_case = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 1 | 0 |
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class _lowerCamelCase ( UpperCamelCase_ ):
__a = "MCTCTFeatureExtractor"
__a = "AutoTokenizer"
def __init__( self , lowerCAmelCase , lowerCAmelCase ) -> Tuple:
super().__init__(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= self.feature_extractor
SCREAMING_SNAKE_CASE__: Optional[int]= False
def __call__( self , *lowerCAmelCase , **lowerCAmelCase ) -> List[str]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase , **lowerCAmelCase )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
SCREAMING_SNAKE_CASE__: str= kwargs.pop('''raw_speech''' )
else:
SCREAMING_SNAKE_CASE__: str= kwargs.pop('''audio''' , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= kwargs.pop('''sampling_rate''' , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Union[str, Any]= kwargs.pop('''text''' , lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
SCREAMING_SNAKE_CASE__: Dict= args[0]
SCREAMING_SNAKE_CASE__: Any= args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
SCREAMING_SNAKE_CASE__: Dict= self.feature_extractor(lowerCAmelCase , *lowerCAmelCase , sampling_rate=lowerCAmelCase , **lowerCAmelCase )
if text is not None:
SCREAMING_SNAKE_CASE__: Tuple= self.tokenizer(lowerCAmelCase , **lowerCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE__: List[str]= encodings['''input_ids''']
return inputs
def UpperCamelCase_ ( self , *lowerCAmelCase , **lowerCAmelCase ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*lowerCAmelCase , **lowerCAmelCase )
def UpperCamelCase_ ( self , *lowerCAmelCase , **lowerCAmelCase ) -> Union[str, Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*lowerCAmelCase , **lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= kwargs.pop('''input_features''' , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: str= kwargs.pop('''labels''' , lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
SCREAMING_SNAKE_CASE__: Optional[int]= args[0]
SCREAMING_SNAKE_CASE__: Optional[int]= args[1:]
if input_features is not None:
SCREAMING_SNAKE_CASE__: int= self.feature_extractor.pad(lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase )
if labels is not None:
SCREAMING_SNAKE_CASE__: List[Any]= self.tokenizer.pad(lowerCAmelCase , **lowerCAmelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
SCREAMING_SNAKE_CASE__: Tuple= labels['''input_ids''']
return input_features
def UpperCamelCase_ ( self , *lowerCAmelCase , **lowerCAmelCase ) -> int:
return self.tokenizer.decode(*lowerCAmelCase , **lowerCAmelCase )
@contextmanager
def UpperCamelCase_ ( self ) -> str:
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= True
SCREAMING_SNAKE_CASE__: List[Any]= self.tokenizer
yield
SCREAMING_SNAKE_CASE__: Optional[Any]= self.feature_extractor
SCREAMING_SNAKE_CASE__: List[str]= False
| 64 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 1 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
return [ord(__UpperCamelCase ) - 96 for elem in plain]
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
return "".join(chr(elem + 96 ) for elem in encoded )
def lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = encode(input("""-> """ ).strip().lower() )
print("""Encoded: """ , __UpperCamelCase )
print("""Decoded:""" , decode(__UpperCamelCase ) )
if __name__ == "__main__":
main()
| 65 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
__snake_case = '''src/diffusers'''
# Matches is_xxx_available()
__snake_case = re.compile(r'''is\_([a-z_]*)_available\(\)''')
# Matches from xxx import bla
__snake_case = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
__snake_case = '''
{0} = None
'''
__snake_case = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
'''
__snake_case = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
def _A ( _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = _re_backend.findall(_lowercase )
if len(_lowercase ) == 0:
return None
return "_and_".join(_lowercase )
def _A ( ) -> Tuple:
"""simple docstring"""
with open(os.path.join(_lowercase , '__init__.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
__UpperCamelCase = f.readlines()
# Get to the point we do the actual imports for type checking
__UpperCamelCase = 0
__UpperCamelCase = {}
# Go through the end of the file
while line_index < len(_lowercase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
__UpperCamelCase = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('else:' ):
line_index += 1
line_index += 1
__UpperCamelCase = []
# Until we unindent, add backend objects to the list
while line_index < len(_lowercase ) and len(lines[line_index] ) > 1:
__UpperCamelCase = lines[line_index]
__UpperCamelCase = _re_single_line_import.search(_lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(_lowercase ) > 0:
__UpperCamelCase = objects
else:
line_index += 1
return backend_specific_objects
def _A ( _lowercase , _lowercase ) -> Union[str, Any]:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(_lowercase )
elif name.islower():
return DUMMY_FUNCTION.format(_lowercase , _lowercase )
else:
return DUMMY_CLASS.format(_lowercase , _lowercase )
def _A ( _lowercase=None ) -> Optional[Any]:
"""simple docstring"""
if backend_specific_objects is None:
__UpperCamelCase = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
__UpperCamelCase = {}
for backend, objects in backend_specific_objects.items():
__UpperCamelCase = '[' + ', '.join(f'''"{b}"''' for b in backend.split('_and_' ) ) + ']'
__UpperCamelCase = '# This file is autogenerated by the command `make fix-copies`, do not edit.\n'
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(_lowercase , _lowercase ) for o in objects] )
__UpperCamelCase = dummy_file
return dummy_files
def _A ( _lowercase=False ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
__UpperCamelCase = {'torch': 'pt'}
# Locate actual dummy modules and read their content.
__UpperCamelCase = os.path.join(_lowercase , 'utils' )
__UpperCamelCase = {
backend: os.path.join(_lowercase , f'''dummy_{short_names.get(_lowercase , _lowercase )}_objects.py''' )
for backend in dummy_files.keys()
}
__UpperCamelCase = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(_lowercase ):
with open(_lowercase , 'r' , encoding='utf-8' , newline='\n' ) as f:
__UpperCamelCase = f.read()
else:
__UpperCamelCase = ''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f'''Updating diffusers.utils.dummy_{short_names.get(_lowercase , _lowercase )}_objects.py as the main '''
'__init__ has new objects.' )
with open(dummy_file_paths[backend] , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'The main __init__ has objects that are not present in '
f'''diffusers.utils.dummy_{short_names.get(_lowercase , _lowercase )}_objects.py. Run `make fix-copies` '''
'to fix this.' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__snake_case = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 1 | 0 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int:
_lowercase : int = args.pruning_method
_lowercase : Optional[int] = args.threshold
_lowercase : str = args.model_name_or_path.rstrip('/' )
_lowercase : Optional[Any] = args.target_model_path
print(F"""Load fine-pruned model from {model_name_or_path}""" )
_lowercase : int = torch.load(os.path.join(SCREAMING_SNAKE_CASE , 'pytorch_model.bin' ) )
_lowercase : List[Any] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_lowercase : Optional[Any] = tensor
print(F"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
_lowercase : Tuple = tensor
print(F"""Copied layer {name}""" )
elif "bias" in name:
_lowercase : List[Any] = tensor
print(F"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
_lowercase : Optional[int] = MagnitudeBinarizer.apply(inputs=SCREAMING_SNAKE_CASE , threshold=SCREAMING_SNAKE_CASE )
_lowercase : Dict = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_lowercase : List[Any] = name[:-6]
_lowercase : Dict = model[F"""{prefix_}mask_scores"""]
_lowercase : Any = TopKBinarizer.apply(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowercase : List[Any] = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_lowercase : Dict = name[:-6]
_lowercase : Union[str, Any] = model[F"""{prefix_}mask_scores"""]
_lowercase : Any = ThresholdBinarizer.apply(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowercase : str = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_lowercase : Optional[Any] = name[:-6]
_lowercase : Optional[int] = model[F"""{prefix_}mask_scores"""]
_lowercase , _lowercase : List[str] = -0.1, 1.1
_lowercase : int = torch.sigmoid(SCREAMING_SNAKE_CASE )
_lowercase : Tuple = s * (r - l) + l
_lowercase : Union[str, Any] = s_bar.clamp(min=0.0 , max=1.0 )
_lowercase : Optional[int] = tensor * mask
print(F"""Pruned layer {name}""" )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
_lowercase : Tuple = os.path.join(
os.path.dirname(SCREAMING_SNAKE_CASE ) , F"""bertarized_{os.path.basename(SCREAMING_SNAKE_CASE )}""" )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
shutil.copytree(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(F"""\nCreated folder {target_model_path}""" )
torch.save(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"--pruning_method",
choices=["l0", "magnitude", "topK", "sigmoied_threshold"],
type=str,
required=True,
help=(
"Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"
" sigmoied_threshold = Soft movement pruning)"
),
)
parser.add_argument(
"--threshold",
type=float,
required=False,
help=(
"For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."
"For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."
"Not needed for `l0`"
),
)
parser.add_argument(
"--model_name_or_path",
type=str,
required=True,
help="Folder containing the model that was previously fine-pruned",
)
parser.add_argument(
"--target_model_path",
default=None,
type=str,
required=False,
help="Folder containing the model that was previously fine-pruned",
)
UpperCamelCase = parser.parse_args()
main(args)
| 66 |
import string
def _A ( _lowercase ) -> None:
"""simple docstring"""
for key in range(len(string.ascii_uppercase ) ):
__UpperCamelCase = ''
for symbol in message:
if symbol in string.ascii_uppercase:
__UpperCamelCase = string.ascii_uppercase.find(_lowercase )
__UpperCamelCase = num - key
if num < 0:
__UpperCamelCase = num + len(string.ascii_uppercase )
__UpperCamelCase = translated + string.ascii_uppercase[num]
else:
__UpperCamelCase = translated + symbol
print(f'''Decryption using Key #{key}: {translated}''' )
def _A ( ) -> None:
"""simple docstring"""
__UpperCamelCase = input('Encrypted message: ' )
__UpperCamelCase = message.upper()
decrypt(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 1 | 0 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
snake_case = logging.get_logger(__name__)
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = '''vision-encoder-decoder'''
SCREAMING_SNAKE_CASE_ : Any = True
def __init__( self : List[Any] ,**__A : List[str] ) -> Optional[Any]:
super().__init__(**__A )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"""A configuraton of type {self.model_type} cannot be instantiated because """
F"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
_lowercase = kwargs.pop('encoder' )
_lowercase = encoder_config.pop('model_type' )
_lowercase = kwargs.pop('decoder' )
_lowercase = decoder_config.pop('model_type' )
_lowercase = AutoConfig.for_model(__A ,**__A )
_lowercase = AutoConfig.for_model(__A ,**__A )
_lowercase = True
@classmethod
def __UpperCAmelCase ( cls : Any ,__A : PretrainedConfig ,__A : PretrainedConfig ,**__A : List[str] ) -> PretrainedConfig:
logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
_lowercase = True
_lowercase = True
return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**__A )
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
_lowercase = copy.deepcopy(self.__dict__ )
_lowercase = self.encoder.to_dict()
_lowercase = self.decoder.to_dict()
_lowercase = self.__class__.model_type
return output
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = version.parse('''1.11''' )
@property
def __UpperCAmelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __UpperCAmelCase ( self : int ) -> float:
return 1e-4
@property
def __UpperCAmelCase ( self : str ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict({'last_hidden_state': {0: 'batch', 1: 'encoder_sequence'}} )
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
_lowercase = OrderedDict()
_lowercase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
_lowercase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
_lowercase = {0: 'batch', 1: 'encoder_sequence'}
return common_inputs
def __UpperCAmelCase ( self : List[str] ,__A : "PreTrainedTokenizerBase" ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional["TensorType"] = None ,) -> Mapping[str, Any]:
import torch
_lowercase = OrderedDict()
_lowercase = super().generate_dummy_inputs(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
_lowercase , _lowercase = dummy_input['input_ids'].shape
_lowercase = (batch, encoder_sequence, self._config.encoder_hidden_size)
_lowercase = dummy_input.pop('input_ids' )
_lowercase = dummy_input.pop('attention_mask' )
_lowercase = torch.zeros(__A )
return common_inputs
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : List[str] ) -> None:
pass
def __UpperCAmelCase ( self : List[str] ,__A : PretrainedConfig ) -> OnnxConfig:
return VisionEncoderDecoderEncoderOnnxConfig(__A )
def __UpperCAmelCase ( self : Dict ,__A : PretrainedConfig ,__A : PretrainedConfig ,__A : str = "default" ) -> OnnxConfig:
_lowercase = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(__A ,__A ) | 67 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = KandinskyInpaintPipeline
_lowercase = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
_lowercase = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
_lowercase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_lowercase = False
@property
def snake_case_ ( self: int ):
'''simple docstring'''
return 32
@property
def snake_case_ ( self: str ):
'''simple docstring'''
return 32
@property
def snake_case_ ( self: Tuple ):
'''simple docstring'''
return self.time_input_dim
@property
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
return 100
@property
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def snake_case_ ( self: Any ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = MCLIPConfig(
numDims=self.cross_attention_dim,transformerDimensions=self.text_embedder_hidden_size,hidden_size=self.text_embedder_hidden_size,intermediate_size=37,num_attention_heads=4,num_hidden_layers=5,vocab_size=1005,)
__UpperCamelCase = MultilingualCLIP(A_ )
__UpperCamelCase = text_encoder.eval()
return text_encoder
@property
def snake_case_ ( self: Any ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__UpperCamelCase = UNetaDConditionModel(**A_ )
return model
@property
def snake_case_ ( self: str ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def snake_case_ ( self: str ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = self.dummy_tokenizer
__UpperCamelCase = self.dummy_unet
__UpperCamelCase = self.dummy_movq
__UpperCamelCase = DDIMScheduler(
num_train_timesteps=1000,beta_schedule='linear',beta_start=0.0_0_0_8_5,beta_end=0.0_1_2,clip_sample=A_,set_alpha_to_one=A_,steps_offset=1,prediction_type='epsilon',thresholding=A_,)
__UpperCamelCase = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def snake_case_ ( self: Tuple,A_: Optional[int],A_: Dict=0 ):
'''simple docstring'''
__UpperCamelCase = floats_tensor((1, self.cross_attention_dim),rng=random.Random(A_ ) ).to(A_ )
__UpperCamelCase = floats_tensor((1, self.cross_attention_dim),rng=random.Random(seed + 1 ) ).to(A_ )
# create init_image
__UpperCamelCase = floats_tensor((1, 3, 64, 64),rng=random.Random(A_ ) ).to(A_ )
__UpperCamelCase = image.cpu().permute(0,2,3,1 )[0]
__UpperCamelCase = Image.fromarray(np.uinta(A_ ) ).convert('RGB' ).resize((256, 256) )
# create mask
__UpperCamelCase = np.ones((64, 64),dtype=np.floataa )
__UpperCamelCase = 0
if str(A_ ).startswith('mps' ):
__UpperCamelCase = torch.manual_seed(A_ )
else:
__UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
__UpperCamelCase = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = 'cpu'
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = self.pipeline_class(**A_ )
__UpperCamelCase = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = pipe(**self.get_dummy_inputs(A_ ) )
__UpperCamelCase = output.images
__UpperCamelCase = pipe(
**self.get_dummy_inputs(A_ ),return_dict=A_,)[0]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
print(F'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' )
__UpperCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
__UpperCamelCase = np.ones((768, 768),dtype=np.floataa )
__UpperCamelCase = 0
__UpperCamelCase = 'a hat'
__UpperCamelCase = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior',torch_dtype=torch.floataa )
pipe_prior.to(A_ )
__UpperCamelCase = KandinskyInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-inpaint',torch_dtype=torch.floataa )
__UpperCamelCase = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
__UpperCamelCase = torch.Generator(device='cpu' ).manual_seed(0 )
__UpperCamelCase, __UpperCamelCase = pipe_prior(
A_,generator=A_,num_inference_steps=5,negative_prompt='',).to_tuple()
__UpperCamelCase = pipeline(
A_,image=A_,mask_image=A_,image_embeds=A_,negative_image_embeds=A_,generator=A_,num_inference_steps=100,height=768,width=768,output_type='np',)
__UpperCamelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(A_,A_ )
| 1 | 0 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _A ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : List[Any] = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str=0 ) -> Any:
__UpperCAmelCase =floats_tensor((1, 3, 128, 128) , rng=random.Random(__SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase =np.random.RandomState(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase ={
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""strength""": 0.75,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _a ( self : Optional[Any] ) -> int:
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.get_dummy_inputs()
__UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images
__UpperCAmelCase =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase =np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _a ( self : Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase =PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.get_dummy_inputs()
__UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images
__UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase =np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _a ( self : Optional[Any] ) -> Dict:
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
# warmup pass to apply optimizations
__UpperCAmelCase =pipe(**self.get_dummy_inputs() )
__UpperCAmelCase =self.get_dummy_inputs()
__UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images
__UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase =np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _a ( self : List[Any] ) -> List[str]:
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase =EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.get_dummy_inputs()
__UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images
__UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase =np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _a ( self : Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase =EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.get_dummy_inputs()
__UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images
__UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase =np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _a ( self : Union[str, Any] ) -> Dict:
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.get_dummy_inputs()
__UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images
__UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase =np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _A ( unittest.TestCase ):
"""simple docstring"""
@property
def _a ( self : List[str] ) -> Optional[int]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _a ( self : Dict ) -> int:
__UpperCAmelCase =ort.SessionOptions()
__UpperCAmelCase =False
return options
def _a ( self : Dict ) -> Any:
__UpperCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__UpperCAmelCase =init_image.resize((768, 512) )
# using the PNDM scheduler by default
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase ="""A fantasy landscape, trending on artstation"""
__UpperCAmelCase =np.random.RandomState(0 )
__UpperCAmelCase =pipe(
prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__SCREAMING_SNAKE_CASE , output_type="""np""" , )
__UpperCAmelCase =output.images
__UpperCAmelCase =images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__UpperCAmelCase =np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _a ( self : List[str] ) -> str:
__UpperCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__UpperCAmelCase =init_image.resize((768, 512) )
__UpperCAmelCase =LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase ="""A fantasy landscape, trending on artstation"""
__UpperCAmelCase =np.random.RandomState(0 )
__UpperCAmelCase =pipe(
prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__SCREAMING_SNAKE_CASE , output_type="""np""" , )
__UpperCAmelCase =output.images
__UpperCAmelCase =images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__UpperCAmelCase =np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 68 |
from typing import Any
class __lowerCamelCase :
def __init__( self: int,A_: Any ):
'''simple docstring'''
__UpperCamelCase = data
__UpperCamelCase = None
def __repr__( self: Any ):
'''simple docstring'''
return F'''Node({self.data})'''
class __lowerCamelCase :
def __init__( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = None
def __iter__( self: int ):
'''simple docstring'''
__UpperCamelCase = self.head
while node:
yield node.data
__UpperCamelCase = node.next
def __len__( self: List[str] ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self: Any ):
'''simple docstring'''
return "->".join([str(A_ ) for item in self] )
def __getitem__( self: int,A_: int ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self: int,A_: int,A_: Any ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
__UpperCamelCase = self.head
for _ in range(A_ ):
__UpperCamelCase = current.next
__UpperCamelCase = data
def snake_case_ ( self: Union[str, Any],A_: Any ):
'''simple docstring'''
self.insert_nth(len(self ),A_ )
def snake_case_ ( self: List[Any],A_: Any ):
'''simple docstring'''
self.insert_nth(0,A_ )
def snake_case_ ( self: Optional[Any],A_: int,A_: Any ):
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
__UpperCamelCase = Node(A_ )
if self.head is None:
__UpperCamelCase = new_node
elif index == 0:
__UpperCamelCase = self.head # link new_node to head
__UpperCamelCase = new_node
else:
__UpperCamelCase = self.head
for _ in range(index - 1 ):
__UpperCamelCase = temp.next
__UpperCamelCase = temp.next
__UpperCamelCase = new_node
def snake_case_ ( self: str ): # print every node data
'''simple docstring'''
print(self )
def snake_case_ ( self: int ):
'''simple docstring'''
return self.delete_nth(0 )
def snake_case_ ( self: str ): # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def snake_case_ ( self: Any,A_: int = 0 ):
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
__UpperCamelCase = self.head # default first node
if index == 0:
__UpperCamelCase = self.head.next
else:
__UpperCamelCase = self.head
for _ in range(index - 1 ):
__UpperCamelCase = temp.next
__UpperCamelCase = temp.next
__UpperCamelCase = temp.next.next
return delete_node.data
def snake_case_ ( self: Any ):
'''simple docstring'''
return self.head is None
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = None
__UpperCamelCase = self.head
while current:
# Store the current node's next node.
__UpperCamelCase = current.next
# Make the current node's next point backwards
__UpperCamelCase = prev
# Make the previous node be the current node
__UpperCamelCase = current
# Make the current node the next node (to progress iteration)
__UpperCamelCase = next_node
# Return prev in order to put the head at the end
__UpperCamelCase = prev
def _A ( ) -> None:
"""simple docstring"""
__UpperCamelCase = LinkedList()
assert linked_list.is_empty() is True
assert str(_lowercase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_lowercase ) == i
linked_list.insert_nth(_lowercase , i + 1 )
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_lowercase ) == 9
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
__UpperCamelCase = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(-8 , 1 ) )
def _A ( ) -> None:
"""simple docstring"""
__UpperCamelCase = [
-9,
1_00,
Node(77_34_51_12 ),
'dlrow olleH',
7,
55_55,
0,
-1_92.5_55_55,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
__UpperCamelCase = LinkedList()
for i in test_input:
linked_list.insert_tail(_lowercase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_lowercase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
__UpperCamelCase = linked_list.delete_head()
assert result == -9
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
__UpperCamelCase = linked_list.delete_tail()
assert result == 12.2
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
__UpperCamelCase = linked_list.delete_nth(10 )
assert result is None
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(_lowercase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_lowercase )
assert (
str(_lowercase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_lowercase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _A ( ) -> List[str]:
"""simple docstring"""
from doctest import testmod
testmod()
__UpperCamelCase = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(_lowercase )
print('\nReading/changing Node data using indexing:' )
print(f'''Element at Position 1: {linked_list[1]}''' )
__UpperCamelCase = input('Enter New Value: ' ).strip()
print('New list:' )
print(_lowercase )
print(f'''length of linked_list is : {len(_lowercase )}''' )
if __name__ == "__main__":
main()
| 1 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : str ) -> list:
__snake_case = [0] * len(_UpperCAmelCase )
for i in range(1 , len(_UpperCAmelCase ) ):
# use last results for better performance - dynamic programming
__snake_case = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
__snake_case = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
__snake_case = j
return prefix_result
def __UpperCAmelCase ( _UpperCAmelCase : str ) -> int:
return max(prefix_function(_UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 69 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 1 | 0 |
import argparse
import json
import subprocess
def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : List[str] ):
'''simple docstring'''
lowerCamelCase_ = []
lowerCamelCase_ = (
f"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
' https://api.github.com/repos/huggingface/transformers/actions/runners'
)
lowerCamelCase_ = subprocess.run(lowercase , shell=lowercase , stdout=subprocess.PIPE )
lowerCamelCase_ = output.stdout.decode('utf-8' )
lowerCamelCase_ = json.loads(lowercase )
lowerCamelCase_ = status['runners']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(lowercase )
# save the result so we can report them on Slack
with open('offline_runners.txt' , 'w' ) as fp:
fp.write(json.dumps(lowercase ) )
if len(lowercase ) > 0:
lowerCamelCase_ = '\n'.join([x['name'] for x in offline_runners] )
raise ValueError(f"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] ):
'''simple docstring'''
return values.split(',' )
lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--target_runners",
default=None,
type=list_str,
required=True,
help="Comma-separated list of runners to check status.",
)
parser.add_argument(
"--token", default=None, type=str, required=True, help="A token that has actions:read permission."
)
lowerCamelCase : Optional[int] = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 70 |
__snake_case = {
'''a''': '''AAAAA''',
'''b''': '''AAAAB''',
'''c''': '''AAABA''',
'''d''': '''AAABB''',
'''e''': '''AABAA''',
'''f''': '''AABAB''',
'''g''': '''AABBA''',
'''h''': '''AABBB''',
'''i''': '''ABAAA''',
'''j''': '''BBBAA''',
'''k''': '''ABAAB''',
'''l''': '''ABABA''',
'''m''': '''ABABB''',
'''n''': '''ABBAA''',
'''o''': '''ABBAB''',
'''p''': '''ABBBA''',
'''q''': '''ABBBB''',
'''r''': '''BAAAA''',
'''s''': '''BAAAB''',
'''t''': '''BAABA''',
'''u''': '''BAABB''',
'''v''': '''BBBAB''',
'''w''': '''BABAA''',
'''x''': '''BABAB''',
'''y''': '''BABBA''',
'''z''': '''BABBB''',
''' ''': ''' ''',
}
__snake_case = {value: key for key, value in encode_dict.items()}
def _A ( _lowercase ) -> str:
"""simple docstring"""
__UpperCamelCase = ''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('encode() accepts only letters of the alphabet and spaces' )
return encoded
def _A ( _lowercase ) -> str:
"""simple docstring"""
if set(_lowercase ) - {"A", "B", " "} != set():
raise Exception('decode() accepts only \'A\', \'B\' and spaces' )
__UpperCamelCase = ''
for word in coded.split():
while len(_lowercase ) != 0:
decoded += decode_dict[word[:5]]
__UpperCamelCase = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 | 0 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/config.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/config.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/config.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/config.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/config.json""",
}
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : List[Any] ="t5"
__A : List[Any] =["past_key_values"]
__A : int ={"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self ,_snake_case=3_21_28 ,_snake_case=5_12 ,_snake_case=64 ,_snake_case=20_48 ,_snake_case=6 ,_snake_case=None ,_snake_case=8 ,_snake_case=32 ,_snake_case=1_28 ,_snake_case=0.1 ,_snake_case=1E-6 ,_snake_case=1.0 ,_snake_case="relu" ,_snake_case=True ,_snake_case=True ,_snake_case=0 ,_snake_case=1 ,**_snake_case ,):
UpperCAmelCase_ : int = vocab_size
UpperCAmelCase_ : str = d_model
UpperCAmelCase_ : Any = d_kv
UpperCAmelCase_ : Any = d_ff
UpperCAmelCase_ : str = num_layers
UpperCAmelCase_ : List[Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
UpperCAmelCase_ : Optional[int] = num_heads
UpperCAmelCase_ : List[str] = relative_attention_num_buckets
UpperCAmelCase_ : int = relative_attention_max_distance
UpperCAmelCase_ : Union[str, Any] = dropout_rate
UpperCAmelCase_ : Optional[Any] = layer_norm_epsilon
UpperCAmelCase_ : Any = initializer_factor
UpperCAmelCase_ : List[Any] = feed_forward_proj
UpperCAmelCase_ : List[str] = use_cache
UpperCAmelCase_ : Any = self.feed_forward_proj.split("-" )
UpperCAmelCase_ : Union[str, Any] = act_info[-1]
UpperCAmelCase_ : str = act_info[0] == "gated"
if len(_snake_case ) > 1 and act_info[0] != "gated" or len(_snake_case ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
UpperCAmelCase_ : Any = "gelu_new"
super().__init__(
pad_token_id=_snake_case ,eos_token_id=_snake_case ,is_encoder_decoder=_snake_case ,**_snake_case ,)
class _snake_case (__SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
UpperCAmelCase_ : Union[str, Any] = "past_encoder_sequence + sequence"
UpperCAmelCase_ : Any = {0: "batch"}
UpperCAmelCase_ : Dict = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
UpperCAmelCase_ : List[Any] = {0: "batch", 1: "decoder_sequence"}
UpperCAmelCase_ : List[Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_snake_case ,direction="inputs" )
return common_inputs
@property
def UpperCamelCase__ ( self ):
return 13
| 71 |
from collections.abc import Generator
from math import sin
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
if len(_lowercase ) != 32:
raise ValueError('Input must be of length 32' )
__UpperCamelCase = B''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__UpperCamelCase = format(_lowercase , '08x' )[-8:]
__UpperCamelCase = B''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
__UpperCamelCase = B''
for char in message:
bit_string += format(_lowercase , '08b' ).encode('utf-8' )
__UpperCamelCase = format(len(_lowercase ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(_lowercase ) % 5_12 != 4_48:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def _A ( _lowercase ) -> Generator[list[int], None, None]:
"""simple docstring"""
if len(_lowercase ) % 5_12 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(_lowercase ) , 5_12 ):
__UpperCamelCase = bit_string[pos : pos + 5_12]
__UpperCamelCase = []
for i in range(0 , 5_12 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def _A ( _lowercase ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__UpperCamelCase = format(_lowercase , '032b' )
__UpperCamelCase = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(_lowercase , 2 )
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
return (a + b) % 2**32
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
__UpperCamelCase = preprocess(_lowercase )
__UpperCamelCase = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
__UpperCamelCase = 0X67_45_23_01
__UpperCamelCase = 0Xef_cd_ab_89
__UpperCamelCase = 0X98_ba_dc_fe
__UpperCamelCase = 0X10_32_54_76
__UpperCamelCase = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(_lowercase ):
__UpperCamelCase = aa
__UpperCamelCase = ba
__UpperCamelCase = ca
__UpperCamelCase = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__UpperCamelCase = d ^ (b & (c ^ d))
__UpperCamelCase = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__UpperCamelCase = c ^ (d & (b ^ c))
__UpperCamelCase = (5 * i + 1) % 16
elif i <= 47:
__UpperCamelCase = b ^ c ^ d
__UpperCamelCase = (3 * i + 5) % 16
else:
__UpperCamelCase = c ^ (b | not_aa(_lowercase ))
__UpperCamelCase = (7 * i) % 16
__UpperCamelCase = (f + a + added_consts[i] + block_words[g]) % 2**32
__UpperCamelCase = d
__UpperCamelCase = c
__UpperCamelCase = b
__UpperCamelCase = sum_aa(_lowercase , left_rotate_aa(_lowercase , shift_amounts[i] ) )
# Add hashed chunk to running total
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = reformat_hex(_lowercase ) + reformat_hex(_lowercase ) + reformat_hex(_lowercase ) + reformat_hex(_lowercase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | 0 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase ( lowercase_ : list[float] ) -> float:
'''simple docstring'''
lowercase =0.0_0
lowercase =0
for resistor in resistors:
if resistor <= 0:
lowercase =f'Resistor at index {index} has a negative or zero value!'
raise ValueError(lowercase_ )
first_sum += 1 / float(lowercase_ )
index += 1
return 1 / first_sum
def UpperCamelCase ( lowercase_ : list[float] ) -> float:
'''simple docstring'''
lowercase =0.0_0
lowercase =0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowercase =f'Resistor at index {index} has a negative value!'
raise ValueError(lowercase_ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__snake_case = 0
__snake_case = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__snake_case = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__snake_case = tuple[int, int]
class __lowerCamelCase :
def __init__( self: str,A_: int,A_: int,A_: int,A_: int,A_: int,A_: Node | None,):
'''simple docstring'''
__UpperCamelCase = pos_x
__UpperCamelCase = pos_y
__UpperCamelCase = (pos_y, pos_x)
__UpperCamelCase = goal_x
__UpperCamelCase = goal_y
__UpperCamelCase = g_cost
__UpperCamelCase = parent
__UpperCamelCase = self.calculate_heuristic()
__UpperCamelCase = self.g_cost + self.h_cost
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = self.pos_x - self.goal_x
__UpperCamelCase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(A_ ) + abs(A_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self: int,A_: Node ):
'''simple docstring'''
return self.f_cost < other.f_cost
class __lowerCamelCase :
def __init__( self: Any,A_: TPosition,A_: TPosition ):
'''simple docstring'''
__UpperCamelCase = Node(start[1],start[0],goal[1],goal[0],0,A_ )
__UpperCamelCase = Node(goal[1],goal[0],goal[1],goal[0],9_9999,A_ )
__UpperCamelCase = [self.start]
__UpperCamelCase = []
__UpperCamelCase = False
def snake_case_ ( self: Any ):
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__UpperCamelCase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(A_ )
self.closed_nodes.append(A_ )
__UpperCamelCase = self.get_successors(A_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(A_ )
else:
# retrieve the best current path
__UpperCamelCase = self.open_nodes.pop(self.open_nodes.index(A_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(A_ )
else:
self.open_nodes.append(A_ )
return [self.start.pos]
def snake_case_ ( self: int,A_: Node ):
'''simple docstring'''
__UpperCamelCase = []
for action in delta:
__UpperCamelCase = parent.pos_x + action[1]
__UpperCamelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
A_,A_,self.target.pos_y,self.target.pos_x,parent.g_cost + 1,A_,) )
return successors
def snake_case_ ( self: Any,A_: Node | None ):
'''simple docstring'''
__UpperCamelCase = node
__UpperCamelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__UpperCamelCase = current_node.parent
path.reverse()
return path
class __lowerCamelCase :
def __init__( self: List[Any],A_: TPosition,A_: TPosition ):
'''simple docstring'''
__UpperCamelCase = AStar(A_,A_ )
__UpperCamelCase = AStar(A_,A_ )
__UpperCamelCase = False
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__UpperCamelCase = self.fwd_astar.open_nodes.pop(0 )
__UpperCamelCase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
A_,A_ )
self.fwd_astar.closed_nodes.append(A_ )
self.bwd_astar.closed_nodes.append(A_ )
__UpperCamelCase = current_bwd_node
__UpperCamelCase = current_fwd_node
__UpperCamelCase = {
self.fwd_astar: self.fwd_astar.get_successors(A_ ),
self.bwd_astar: self.bwd_astar.get_successors(A_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(A_ )
else:
# retrieve the best current path
__UpperCamelCase = astar.open_nodes.pop(
astar.open_nodes.index(A_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(A_ )
else:
astar.open_nodes.append(A_ )
return [self.fwd_astar.start.pos]
def snake_case_ ( self: List[str],A_: Node,A_: Node ):
'''simple docstring'''
__UpperCamelCase = self.fwd_astar.retrace_path(A_ )
__UpperCamelCase = self.bwd_astar.retrace_path(A_ )
bwd_path.pop()
bwd_path.reverse()
__UpperCamelCase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__snake_case = (0, 0)
__snake_case = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__snake_case = time.time()
__snake_case = AStar(init, goal)
__snake_case = a_star.search()
__snake_case = time.time() - start_time
print(f"""AStar execution time = {end_time:f} seconds""")
__snake_case = time.time()
__snake_case = BidirectionalAStar(init, goal)
__snake_case = time.time() - bd_start_time
print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 1 | 0 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = ['a', 'b', 'c']
# Defaults to last layer if both are None
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices(a , a , a)
self.assertEqual(a , ['c'])
self.assertEqual(a , [2])
# Out indices set to match out features
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices(['a', 'c'] , a , a)
self.assertEqual(a , ['a', 'c'])
self.assertEqual(a , [0, 2])
# Out features set to match out indices
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices(a , [0, 2] , a)
self.assertEqual(a , ['a', 'c'])
self.assertEqual(a , [0, 2])
# Out features selected from negative indices
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices(a , [-3, -1] , a)
self.assertEqual(a , ['a', 'c'])
self.assertEqual(a , [-3, -1])
def SCREAMING_SNAKE_CASE__ ( self) -> int:
# Stage names must be set
with self.assertRaises(a):
verify_out_features_out_indices(['a', 'b'] , (0, 1) , a)
# Out features must be a list
with self.assertRaises(a):
verify_out_features_out_indices(('a', 'b') , (0, 1) , ['a', 'b'])
# Out features must be a subset of stage names
with self.assertRaises(a):
verify_out_features_out_indices(['a', 'b'] , (0, 1) , ['a'])
# Out indices must be a list or tuple
with self.assertRaises(a):
verify_out_features_out_indices(a , 0 , ['a', 'b'])
# Out indices must be a subset of stage names
with self.assertRaises(a):
verify_out_features_out_indices(a , (0, 1) , ['a'])
# Out features and out indices must be the same length
with self.assertRaises(a):
verify_out_features_out_indices(['a', 'b'] , (0,) , ['a', 'b', 'c'])
# Out features should match out indices
with self.assertRaises(a):
verify_out_features_out_indices(['a', 'b'] , (0, 2) , ['a', 'b', 'c'])
# Out features and out indices should be in order
with self.assertRaises(a):
verify_out_features_out_indices(['b', 'a'] , (0, 1) , ['a', 'b'])
# Check passes with valid inputs
verify_out_features_out_indices(['a', 'b', 'd'] , (0, 1, -1) , ['a', 'b', 'c', 'd'])
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = BackboneMixin()
SCREAMING_SNAKE_CASE = ['a', 'b', 'c']
SCREAMING_SNAKE_CASE = ['a', 'c']
SCREAMING_SNAKE_CASE = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['a', 'c'])
self.assertEqual(backbone.out_indices , [0, 2])
# Check out features and indices are updated correctly
SCREAMING_SNAKE_CASE = ['a', 'b']
self.assertEqual(backbone.out_features , ['a', 'b'])
self.assertEqual(backbone.out_indices , [0, 1])
SCREAMING_SNAKE_CASE = [-3, -1]
self.assertEqual(backbone.out_features , ['a', 'c'])
self.assertEqual(backbone.out_indices , [-3, -1])
| 73 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__snake_case = get_tests_dir('''fixtures''')
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: int ):
'''simple docstring'''
__UpperCamelCase = mock.Mock()
__UpperCamelCase = 500
__UpperCamelCase = {}
__UpperCamelCase = HTTPError
__UpperCamelCase = {}
# Download this model to make sure it's in the cache.
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request',return_value=A_ ) as mock_head:
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' )
@is_staging_test
class __lowerCamelCase (unittest.TestCase ):
@classmethod
def snake_case_ ( cls: Tuple ):
'''simple docstring'''
__UpperCamelCase = TOKEN
HfFolder.save_token(A_ )
@classmethod
def snake_case_ ( cls: Tuple ):
'''simple docstring'''
try:
delete_repo(token=cls._token,repo_id='test-feature-extractor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token,repo_id='valid_org/test-feature-extractor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token,repo_id='test-dynamic-feature-extractor' )
except HTTPError:
pass
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('test-feature-extractor',use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
# Reset repo
delete_repo(token=self._token,repo_id='test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
A_,repo_id='test-feature-extractor',push_to_hub=A_,use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('valid_org/test-feature-extractor',use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
# Reset repo
delete_repo(token=self._token,repo_id='valid_org/test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
A_,repo_id='valid_org/test-feature-extractor-org',push_to_hub=A_,use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
def snake_case_ ( self: int ):
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
__UpperCamelCase = CustomFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('test-dynamic-feature-extractor',use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map,{'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'},)
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(
F'''{USER}/test-dynamic-feature-extractor''',trust_remote_code=A_ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__,'CustomFeatureExtractor' )
| 1 | 0 |
from __future__ import annotations
def a__ ( snake_case , snake_case ):
"""simple docstring"""
if nth_term == "":
return [""]
__SCREAMING_SNAKE_CASE : int = int(snake_case )
__SCREAMING_SNAKE_CASE : Tuple = int(snake_case )
__SCREAMING_SNAKE_CASE : list[str] = []
for temp in range(int(snake_case ) ):
series.append(F'''1 / {pow(temp + 1 , int(snake_case ) )}''' if series else '''1''' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = int(input("""Enter the last number (nth term) of the P-Series"""))
lowercase_ = int(input("""Enter the power for P-Series"""))
print("""Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p""")
print(p_series(nth_term, power))
| 74 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__snake_case = 1_6
__snake_case = 3_2
def _A ( _lowercase , _lowercase = 16 , _lowercase = "bert-base-cased" ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = AutoTokenizer.from_pretrained(_lowercase )
__UpperCamelCase = load_dataset('glue' , 'mrpc' )
def tokenize_function(_lowercase ):
# max_length=None => use the model max length (it's actually the default)
__UpperCamelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowercase , max_length=_lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__UpperCamelCase = datasets.map(
_lowercase , batched=_lowercase , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_lowercase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCamelCase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowercase , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return tokenizer.pad(_lowercase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
__UpperCamelCase = DataLoader(
tokenized_datasets['train'] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
__UpperCamelCase = DataLoader(
tokenized_datasets['validation'] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
return train_dataloader, eval_dataloader
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCamelCase = config['lr']
__UpperCamelCase = int(config['num_epochs'] )
__UpperCamelCase = int(config['seed'] )
__UpperCamelCase = int(config['batch_size'] )
__UpperCamelCase = args.model_name_or_path
set_seed(_lowercase )
__UpperCamelCase, __UpperCamelCase = get_dataloaders(_lowercase , _lowercase , _lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCamelCase = AutoModelForSequenceClassification.from_pretrained(_lowercase , return_dict=_lowercase )
# Instantiate optimizer
__UpperCamelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__UpperCamelCase = optimizer_cls(params=model.parameters() , lr=_lowercase )
if accelerator.state.deepspeed_plugin is not None:
__UpperCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
__UpperCamelCase = 1
__UpperCamelCase = (len(_lowercase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=_lowercase , num_warmup_steps=0 , num_training_steps=_lowercase , )
else:
__UpperCamelCase = DummyScheduler(_lowercase , total_num_steps=_lowercase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# We need to keep track of how many total steps we have iterated over
__UpperCamelCase = 0
# We also need to keep track of the stating epoch so files are named properly
__UpperCamelCase = 0
# Now we train the model
__UpperCamelCase = evaluate.load('glue' , 'mrpc' )
__UpperCamelCase = 0
__UpperCamelCase = {}
for epoch in range(_lowercase , _lowercase ):
model.train()
for step, batch in enumerate(_lowercase ):
__UpperCamelCase = model(**_lowercase )
__UpperCamelCase = outputs.loss
__UpperCamelCase = loss / gradient_accumulation_steps
accelerator.backward(_lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
__UpperCamelCase = 0
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCamelCase = model(**_lowercase )
__UpperCamelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__UpperCamelCase, __UpperCamelCase = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_lowercase ) - 1:
__UpperCamelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__UpperCamelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_lowercase , references=_lowercase , )
__UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , _lowercase )
__UpperCamelCase = eval_metric['accuracy']
if best_performance < eval_metric["accuracy"]:
__UpperCamelCase = eval_metric['accuracy']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(_lowercase , _lowercase )
def _A ( ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=_lowercase , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_lowercase , )
parser.add_argument(
'--output_dir' , type=_lowercase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=_lowercase , default=_lowercase , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=_lowercase , default=3 , help='Number of train epochs.' , )
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(_lowercase , _lowercase )
if __name__ == "__main__":
main()
| 1 | 0 |
'''simple docstring'''
import math
def a__ ( ) -> None:
UpperCAmelCase__ : List[str] = input('''Enter message: ''' )
UpperCAmelCase__ : Any = int(input(F"""Enter key [2-{len(lowerCAmelCase__ ) - 1}]: """ ) )
UpperCAmelCase__ : List[str] = input('''Encryption/Decryption [e/d]: ''' )
if mode.lower().startswith('''e''' ):
UpperCAmelCase__ : Dict = encrypt_message(lowerCAmelCase__ , lowerCAmelCase__ )
elif mode.lower().startswith('''d''' ):
UpperCAmelCase__ : Optional[int] = decrypt_message(lowerCAmelCase__ , lowerCAmelCase__ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F"""Output:\n{text + "|"}""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : Optional[int] = [''''''] * key
for col in range(lowerCAmelCase__ ):
UpperCAmelCase__ : Tuple = col
while pointer < len(lowerCAmelCase__ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : int = math.ceil(len(lowerCAmelCase__ ) / key )
UpperCAmelCase__ : Any = key
UpperCAmelCase__ : Optional[int] = (num_cols * num_rows) - len(lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = [''''''] * num_cols
UpperCAmelCase__ : List[str] = 0
UpperCAmelCase__ : List[Any] = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
UpperCAmelCase__ : Optional[int] = 0
row += 1
return "".join(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 75 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __lowerCamelCase (_a ):
@slow
@require_torch
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny','prajjwal1/bert-tiny' )
__UpperCamelCase = BertTokenizer.from_pretrained('bert-base-uncased' )
__UpperCamelCase = bertabert.config.encoder.vocab_size
__UpperCamelCase = tokenizer.sep_token_id
__UpperCamelCase = tokenizer.cls_token_id
__UpperCamelCase = 128
__UpperCamelCase = datasets.load_dataset('cnn_dailymail','3.0.0',split='train[:1%]' )
__UpperCamelCase = datasets.load_dataset('cnn_dailymail','3.0.0',split='validation[:1%]' )
__UpperCamelCase = train_dataset.select(range(32 ) )
__UpperCamelCase = val_dataset.select(range(16 ) )
__UpperCamelCase = 4
def _map_to_encoder_decoder_inputs(A_: Dict ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__UpperCamelCase = tokenizer(batch['article'],padding='max_length',truncation=A_,max_length=512 )
__UpperCamelCase = tokenizer(batch['highlights'],padding='max_length',truncation=A_,max_length=128 )
__UpperCamelCase = inputs.input_ids
__UpperCamelCase = inputs.attention_mask
__UpperCamelCase = outputs.input_ids
__UpperCamelCase = outputs.input_ids.copy()
__UpperCamelCase = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
__UpperCamelCase = outputs.attention_mask
assert all(len(A_ ) == 512 for x in inputs.input_ids )
assert all(len(A_ ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(A_: str ):
__UpperCamelCase = pred.label_ids
__UpperCamelCase = pred.predictions
# all unnecessary tokens are removed
__UpperCamelCase = tokenizer.batch_decode(A_,skip_special_tokens=A_ )
__UpperCamelCase = tokenizer.batch_decode(A_,skip_special_tokens=A_ )
__UpperCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(A_ ) )] ) / len(A_ )
return {"accuracy": accuracy}
# map train dataset
__UpperCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs,batched=A_,batch_size=A_,remove_columns=['article', 'highlights'],)
train_dataset.set_format(
type='torch',columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'],)
# same for validation dataset
__UpperCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs,batched=A_,batch_size=A_,remove_columns=['article', 'highlights'],)
val_dataset.set_format(
type='torch',columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'],)
__UpperCamelCase = self.get_auto_remove_tmp_dir()
__UpperCamelCase = SeqaSeqTrainingArguments(
output_dir=A_,per_device_train_batch_size=A_,per_device_eval_batch_size=A_,predict_with_generate=A_,evaluation_strategy='steps',do_train=A_,do_eval=A_,warmup_steps=0,eval_steps=2,logging_steps=2,)
# instantiate trainer
__UpperCamelCase = SeqaSeqTrainer(
model=A_,args=A_,compute_metrics=_compute_metrics,train_dataset=A_,eval_dataset=A_,tokenizer=A_,)
# start training
trainer.train()
| 1 | 0 |
"""simple docstring"""
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ) , end='''\t''' )
else:
print('''INF''' , end='''\t''' )
print()
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
__lowercase : List[Any] = [[float('''inf''' ) for _ in range(__UpperCamelCase )] for _ in range(__UpperCamelCase )]
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
__lowercase : Tuple = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(__UpperCamelCase ):
# looping through rows of graph array
for i in range(__UpperCamelCase ):
# looping through columns of graph array
for j in range(__UpperCamelCase ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
__lowercase : List[Any] = dist[i][k] + dist[k][j]
_print_dist(__UpperCamelCase , __UpperCamelCase )
return dist, v
if __name__ == "__main__":
a_ = int(input('Enter number of vertices: '))
a_ = int(input('Enter number of edges: '))
a_ = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
a_ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
a_ = int(input('Enter source:'))
a_ = int(input('Enter destination:'))
a_ = float(input('Enter weight:'))
a_ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 76 |
def _A ( _lowercase = 1_00 ) -> int:
"""simple docstring"""
__UpperCamelCase = 0
__UpperCamelCase = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 1 | 0 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def _UpperCamelCase ( UpperCamelCase ) -> int:
"""simple docstring"""
def wrapper(*UpperCamelCase , **UpperCamelCase ):
__UpperCAmelCase : int = timeit.default_timer()
__UpperCAmelCase : int = func(*UpperCamelCase , **UpperCamelCase )
__UpperCAmelCase : Any = timeit.default_timer() - starttime
return delta
__UpperCAmelCase : Optional[Any] = func.__name__
return wrapper
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase=100 , UpperCamelCase=None ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : Dict = []
__UpperCAmelCase : Optional[Any] = seq_shapes or {}
for i in range(UpperCamelCase ):
__UpperCAmelCase : Union[str, Any] = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(UpperCamelCase , _ArrayXD ):
__UpperCAmelCase : List[str] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(UpperCamelCase , datasets.Value ):
if v.dtype == "string":
__UpperCAmelCase : Optional[int] = "The small grey turtle was surprisingly fast when challenged."
else:
__UpperCAmelCase : int = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(UpperCamelCase , datasets.Sequence ):
while isinstance(UpperCamelCase , datasets.Sequence ):
__UpperCAmelCase : List[Any] = v.feature
__UpperCAmelCase : Optional[int] = seq_shapes[k]
__UpperCAmelCase : Tuple = np.random.rand(*UpperCamelCase ).astype(v.dtype )
__UpperCAmelCase : Any = data
dummy_data.append((i, example) )
return dummy_data
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=100 , UpperCamelCase=None ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : int = generate_examples(UpperCamelCase , num_examples=UpperCamelCase , seq_shapes=UpperCamelCase )
with ArrowWriter(features=UpperCamelCase , path=UpperCamelCase ) as writer:
for key, record in dummy_data:
__UpperCAmelCase : Union[str, Any] = features.encode_example(UpperCamelCase )
writer.write(UpperCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
__UpperCAmelCase : int = datasets.Dataset.from_file(filename=UpperCamelCase , info=datasets.DatasetInfo(features=UpperCamelCase ) )
return dataset
| 77 |
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def _A ( _lowercase , _lowercase=0 ) -> Dict:
"""simple docstring"""
return sorted(_lowercase , key=lambda _lowercase : x[column] )
def _A ( _lowercase , _lowercase , _lowercase=float('inf' ) ) -> List[Any]:
"""simple docstring"""
for i in range(points_counts - 1 ):
for j in range(i + 1 , _lowercase ):
__UpperCamelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__UpperCamelCase = current_dis
return min_dis
def _A ( _lowercase , _lowercase , _lowercase=float('inf' ) ) -> Tuple:
"""simple docstring"""
for i in range(min(6 , points_counts - 1 ) , _lowercase ):
for j in range(max(0 , i - 6 ) , _lowercase ):
__UpperCamelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__UpperCamelCase = current_dis
return min_dis
def _A ( _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
"""simple docstring"""
if points_counts <= 3:
return dis_between_closest_pair(_lowercase , _lowercase )
# recursion
__UpperCamelCase = points_counts // 2
__UpperCamelCase = closest_pair_of_points_sqr(
_lowercase , points_sorted_on_y[:mid] , _lowercase )
__UpperCamelCase = closest_pair_of_points_sqr(
_lowercase , points_sorted_on_y[mid:] , points_counts - mid )
__UpperCamelCase = min(_lowercase , _lowercase )
__UpperCamelCase = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(_lowercase )
__UpperCamelCase = dis_between_closest_in_strip(
_lowercase , len(_lowercase ) , _lowercase )
return min(_lowercase , _lowercase )
def _A ( _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = column_based_sort(_lowercase , column=0 )
__UpperCamelCase = column_based_sort(_lowercase , column=1 )
return (
closest_pair_of_points_sqr(
_lowercase , _lowercase , _lowercase )
) ** 0.5
if __name__ == "__main__":
__snake_case = [(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)]
print('''Distance:''', closest_pair_of_points(points, len(points)))
| 1 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __A ( unittest.TestCase ):
def _lowercase (self : Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowercase (self : Any ):
UpperCAmelCase_ = 1
UpperCAmelCase_ = 3
UpperCAmelCase_ = (32, 32)
UpperCAmelCase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__a )
return image
@property
def _lowercase (self : List[Any] ):
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def _lowercase (self : int ):
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _lowercase (self : Optional[Any] ):
torch.manual_seed(0 )
UpperCAmelCase_ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(__a )
@property
def _lowercase (self : Optional[Any] ):
def extract(*__a : Any , **__a : Any ):
class __A :
def __init__(self : Union[str, Any] ):
UpperCAmelCase_ = torch.ones([0] )
def _lowercase (self : Tuple , __a : str ):
self.pixel_values.to(__a )
return self
return Out()
return extract
def _lowercase (self : Tuple ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.dummy_cond_unet
UpperCAmelCase_ = PNDMScheduler(skip_prk_steps=__a )
UpperCAmelCase_ = self.dummy_vae
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
UpperCAmelCase_ = 77
UpperCAmelCase_ = self.dummy_image.to(__a )
UpperCAmelCase_ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ = AltDiffusionImgaImgPipeline(
unet=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , safety_checker=__a , feature_extractor=self.dummy_extractor , )
UpperCAmelCase_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__a )
UpperCAmelCase_ = alt_pipe.to(__a )
alt_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase_ = alt_pipe(
[prompt] , generator=__a , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=__a , )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase_ = alt_pipe(
[prompt] , generator=__a , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=__a , return_dict=__a , )[0]
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ = np.array([0.44_27, 0.37_31, 0.42_49, 0.49_41, 0.45_46, 0.41_48, 0.41_93, 0.46_66, 0.44_99] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _lowercase (self : str ):
UpperCAmelCase_ = self.dummy_cond_unet
UpperCAmelCase_ = PNDMScheduler(skip_prk_steps=__a )
UpperCAmelCase_ = self.dummy_vae
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
UpperCAmelCase_ = 77
UpperCAmelCase_ = self.dummy_image.to(__a )
# put models in fp16
UpperCAmelCase_ = unet.half()
UpperCAmelCase_ = vae.half()
UpperCAmelCase_ = bert.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ = AltDiffusionImgaImgPipeline(
unet=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , safety_checker=__a , feature_extractor=self.dummy_extractor , )
UpperCAmelCase_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__a )
UpperCAmelCase_ = alt_pipe.to(__a )
alt_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = alt_pipe(
[prompt] , generator=__a , num_inference_steps=2 , output_type="np" , image=__a , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _lowercase (self : Tuple ):
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
UpperCAmelCase_ = init_image.resize((760, 504) )
UpperCAmelCase_ = "BAAI/AltDiffusion"
UpperCAmelCase_ = AltDiffusionImgaImgPipeline.from_pretrained(
__a , safety_checker=__a , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
UpperCAmelCase_ = "A fantasy landscape, trending on artstation"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=__a , image=__a , strength=0.75 , guidance_scale=7.5 , generator=__a , output_type="np" , )
UpperCAmelCase_ = output.images[0]
UpperCAmelCase_ = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
UpperCAmelCase_ = np.array([0.93_58, 0.93_97, 0.95_99, 0.99_01, 1.00_00, 1.00_00, 0.98_82, 1.00_00, 1.00_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def _lowercase (self : Dict ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
UpperCAmelCase_ = init_image.resize((768, 512) )
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
UpperCAmelCase_ = "BAAI/AltDiffusion"
UpperCAmelCase_ = AltDiffusionImgaImgPipeline.from_pretrained(
__a , safety_checker=__a , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
UpperCAmelCase_ = "A fantasy landscape, trending on artstation"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=__a , image=__a , strength=0.75 , guidance_scale=7.5 , generator=__a , output_type="np" , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 78 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''',
'''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''',
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''',
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''',
'''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'''
),
'''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class __lowerCamelCase (_a ):
_lowercase = """bert"""
def __init__( self: Any,A_: Dict=3_0522,A_: Optional[Any]=768,A_: Union[str, Any]=12,A_: List[Any]=12,A_: Optional[int]=3072,A_: Union[str, Any]="gelu",A_: List[str]=0.1,A_: Dict=0.1,A_: Optional[int]=512,A_: Optional[Any]=2,A_: Union[str, Any]=0.0_2,A_: List[Any]=1E-12,A_: Optional[int]=0,A_: List[Any]="absolute",A_: str=True,A_: Union[str, Any]=None,**A_: int,):
'''simple docstring'''
super().__init__(pad_token_id=A_,**A_ )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = position_embedding_type
__UpperCamelCase = use_cache
__UpperCamelCase = classifier_dropout
class __lowerCamelCase (_a ):
@property
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 1 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class UpperCAmelCase_ ( __lowerCamelCase ):
__lowerCamelCase = 'van'
def __init__( self , _lowerCAmelCase=224 , _lowerCAmelCase=3 , _lowerCAmelCase=[7, 3, 3, 3] , _lowerCAmelCase=[4, 2, 2, 2] , _lowerCAmelCase=[64, 128, 320, 512] , _lowerCAmelCase=[3, 3, 12, 3] , _lowerCAmelCase=[8, 8, 4, 4] , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0_2 , _lowerCAmelCase=1e-6 , _lowerCAmelCase=1e-2 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , **_lowerCAmelCase , ):
super().__init__(**_lowerCAmelCase )
UpperCAmelCase__ : Tuple = image_size
UpperCAmelCase__ : Optional[Any] = num_channels
UpperCAmelCase__ : Optional[int] = patch_sizes
UpperCAmelCase__ : int = strides
UpperCAmelCase__ : Optional[int] = hidden_sizes
UpperCAmelCase__ : str = depths
UpperCAmelCase__ : Optional[Any] = mlp_ratios
UpperCAmelCase__ : List[Any] = hidden_act
UpperCAmelCase__ : Tuple = initializer_range
UpperCAmelCase__ : Any = layer_norm_eps
UpperCAmelCase__ : List[Any] = layer_scale_init_value
UpperCAmelCase__ : int = drop_path_rate
UpperCAmelCase__ : Dict = dropout_rate
| 79 |
def _A ( _lowercase ) -> int:
"""simple docstring"""
assert column_title.isupper()
__UpperCamelCase = 0
__UpperCamelCase = len(_lowercase ) - 1
__UpperCamelCase = 0
while index >= 0:
__UpperCamelCase = (ord(column_title[index] ) - 64) * pow(26 , _lowercase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Optional[int] = StableDiffusionInstructPixaPixPipeline
__snake_case :List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'}
__snake_case :Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__snake_case :int = IMAGE_TO_IMAGE_IMAGE_PARAMS
__snake_case :int = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _a ( self : List[Any] ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
__lowercase = PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
torch.manual_seed(0 )
__lowercase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__lowercase = CLIPTextModel(_lowerCAmelCase )
__lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowercase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _a ( self : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any]=0 ) -> Any:
"""simple docstring"""
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
__lowercase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowercase = Image.fromarray(np.uinta(_lowerCAmelCase ) ).convert("""RGB""" )
if str(_lowerCAmelCase ).startswith("""mps""" ):
__lowercase = torch.manual_seed(_lowerCAmelCase )
else:
__lowercase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def _a ( self : str ) -> Tuple:
"""simple docstring"""
__lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = StableDiffusionInstructPixaPixPipeline(**_lowerCAmelCase )
__lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs(_lowerCAmelCase )
__lowercase = sd_pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowercase = np.array([0.7_526, 0.3_750, 0.4_547, 0.6_117, 0.5_866, 0.5_016, 0.4_327, 0.5_642, 0.4_815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : Any ) -> Any:
"""simple docstring"""
__lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = StableDiffusionInstructPixaPixPipeline(**_lowerCAmelCase )
__lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs(_lowerCAmelCase )
__lowercase = """french fries"""
__lowercase = sd_pipe(**_lowerCAmelCase , negative_prompt=_lowerCAmelCase )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowercase = np.array([0.7_511, 0.3_642, 0.4_553, 0.6_236, 0.5_797, 0.5_013, 0.4_343, 0.5_611, 0.4_831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = StableDiffusionInstructPixaPixPipeline(**_lowerCAmelCase )
__lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs(_lowerCAmelCase )
__lowercase = [inputs["""prompt"""]] * 2
__lowercase = np.array(inputs["""image"""] ).astype(np.floataa ) / 255.0
__lowercase = torch.from_numpy(_lowerCAmelCase ).unsqueeze(0 ).to(_lowerCAmelCase )
__lowercase = image / 2 + 0.5
__lowercase = image.permute(0 , 3 , 1 , 2 )
__lowercase = image.repeat(2 , 1 , 1 , 1 )
__lowercase = sd_pipe(**_lowerCAmelCase ).images
__lowercase = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
__lowercase = np.array([0.5_812, 0.5_748, 0.5_222, 0.5_908, 0.5_695, 0.7_174, 0.6_804, 0.5_523, 0.5_579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : int ) -> List[str]:
"""simple docstring"""
__lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
__lowercase = StableDiffusionInstructPixaPixPipeline(**_lowerCAmelCase )
__lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs(_lowerCAmelCase )
__lowercase = sd_pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
__lowercase = [round(_lowerCAmelCase , 4 ) for x in image_slice.flatten().tolist()]
print(""",""".join([str(_lowerCAmelCase ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
__lowercase = np.array([0.7_417, 0.3_842, 0.4_732, 0.5_776, 0.5_891, 0.5_139, 0.4_052, 0.5_673, 0.4_986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : List[str] ) -> Dict:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def _a ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__lowercase = self.get_dummy_components()
__lowercase = StableDiffusionInstructPixaPixPipeline(**_lowerCAmelCase )
__lowercase = VaeImageProcessor(do_resize=_lowerCAmelCase , do_normalize=_lowerCAmelCase )
__lowercase = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = pipe(**self.get_dummy_inputs_by_type(_lowerCAmelCase , input_image_type="""pt""" ) )[0]
__lowercase = components["""vae"""]
__lowercase = self.get_dummy_inputs_by_type(_lowerCAmelCase , input_image_type="""pt""" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
__lowercase = vae.encode(inputs[image_param] ).latent_dist.mode()
__lowercase = pipe(**_lowerCAmelCase )[0]
__lowercase = np.abs(out - out_latents_inputs ).max()
self.assertLess(_lowerCAmelCase , 1e-4 , """passing latents as image input generate different result from passing image""" )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Dict , _lowerCAmelCase : str=0 ) -> Optional[int]:
"""simple docstring"""
__lowercase = torch.manual_seed(_lowerCAmelCase )
__lowercase = load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" )
__lowercase = {
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def _a ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
__lowercase = self.get_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.5_902, 0.6_015, 0.6_027, 0.5_983, 0.6_092, 0.6_061, 0.5_765, 0.5_785, 0.5_555] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _a ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_lowerCAmelCase )
__lowercase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
__lowercase = self.get_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.6_578, 0.6_817, 0.6_972, 0.6_761, 0.6_856, 0.6_916, 0.6_428, 0.6_516, 0.6_301] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _a ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_lowerCAmelCase )
__lowercase = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
__lowercase = self.get_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.3_828, 0.3_834, 0.3_818, 0.3_792, 0.3_865, 0.3_752, 0.3_792, 0.3_847, 0.3_753] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase = 0
def callback_fn(_lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : torch.FloatTensor ) -> None:
__lowercase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__lowercase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__lowercase = latents[0, -3:, -3:, -1]
__lowercase = np.array([-0.2_463, -0.4_644, -0.9_756, 1.5_176, 1.4_414, 0.7_866, 0.9_897, 0.8_521, 0.7_983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
__lowercase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__lowercase = latents[0, -3:, -3:, -1]
__lowercase = np.array([-0.2_644, -0.4_626, -0.9_653, 1.5_176, 1.4_551, 0.7_686, 0.9_805, 0.8_452, 0.8_115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
__lowercase = False
__lowercase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_lowerCAmelCase , torch_dtype=torch.floataa )
__lowercase = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
__lowercase = self.get_inputs()
pipe(**_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _a ( self : Optional[int] ) -> str:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowercase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_lowerCAmelCase , torch_dtype=torch.floataa )
__lowercase = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__lowercase = self.get_inputs()
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def _a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
__lowercase = inputs["""image"""].resize((504, 504) )
__lowercase = """timbrooks/instruct-pix2pix"""
__lowercase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
_lowerCAmelCase , safety_checker=_lowerCAmelCase , )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0]
__lowercase = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
__lowercase = np.array([0.2_726, 0.2_529, 0.2_664, 0.2_655, 0.2_641, 0.2_642, 0.2_591, 0.2_649, 0.2_590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 80 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _A ( ) -> int:
"""simple docstring"""
__UpperCamelCase = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
__UpperCamelCase = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ).convert('RGB' )
return image
def _A ( _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def _A ( _lowercase , _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = dct.pop(_lowercase )
__UpperCamelCase = val
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__UpperCamelCase = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
__UpperCamelCase = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__UpperCamelCase = torch.cat((q_bias, torch.zeros_like(_lowercase , requires_grad=_lowercase ), v_bias) )
__UpperCamelCase = qkv_bias
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
__UpperCamelCase = 3_64 if 'coco' in model_name else 2_24
__UpperCamelCase = BlipaVisionConfig(image_size=_lowercase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__UpperCamelCase = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=_lowercase ).to_dict()
elif "opt-6.7b" in model_name:
__UpperCamelCase = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=_lowercase ).to_dict()
elif "t5-xl" in model_name:
__UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
__UpperCamelCase = BlipaConfig(vision_config=_lowercase , text_config=_lowercase )
return config, image_size
@torch.no_grad()
def _A ( _lowercase , _lowercase=None , _lowercase=False ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
__UpperCamelCase = tokenizer('\n' , add_special_tokens=_lowercase ).input_ids[0]
__UpperCamelCase, __UpperCamelCase = get_blipa_config(_lowercase , eos_token_id=_lowercase )
__UpperCamelCase = BlipaForConditionalGeneration(_lowercase ).eval()
__UpperCamelCase = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
__UpperCamelCase, __UpperCamelCase = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
__UpperCamelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = load_model_and_preprocess(
name=_lowercase , model_type=_lowercase , is_eval=_lowercase , device=_lowercase )
original_model.eval()
print('Done!' )
# update state dict keys
__UpperCamelCase = original_model.state_dict()
__UpperCamelCase = create_rename_keys(_lowercase )
for src, dest in rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__UpperCamelCase = state_dict.pop(_lowercase )
if key.startswith('Qformer.bert' ):
__UpperCamelCase = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
__UpperCamelCase = key.replace('self' , 'attention' )
if "opt_proj" in key:
__UpperCamelCase = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
__UpperCamelCase = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
__UpperCamelCase = key.replace('opt' , 'language' )
if key.startswith('t5' ):
__UpperCamelCase = key.replace('t5' , 'language' )
__UpperCamelCase = val
# read in qv biases
read_in_q_v_bias(_lowercase , _lowercase )
__UpperCamelCase, __UpperCamelCase = hf_model.load_state_dict(_lowercase , strict=_lowercase )
assert len(_lowercase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__UpperCamelCase = load_demo_image()
__UpperCamelCase = vis_processors['eval'](_lowercase ).unsqueeze(0 ).to(_lowercase )
__UpperCamelCase = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(_lowercase )
# create processor
__UpperCamelCase = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=_lowercase , image_std=_lowercase )
__UpperCamelCase = BlipaProcessor(image_processor=_lowercase , tokenizer=_lowercase )
__UpperCamelCase = processor(images=_lowercase , return_tensors='pt' ).pixel_values.to(_lowercase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowercase , _lowercase )
original_model.to(_lowercase )
hf_model.to(_lowercase )
with torch.no_grad():
if "opt" in model_name:
__UpperCamelCase = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
__UpperCamelCase = hf_model(_lowercase , _lowercase ).logits
else:
__UpperCamelCase = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
__UpperCamelCase = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
__UpperCamelCase = hf_model(_lowercase , _lowercase , labels=_lowercase ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__UpperCamelCase = torch.tensor(
[[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=_lowercase )
assert torch.allclose(logits[0, :3, :3] , _lowercase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__UpperCamelCase = torch.tensor(
[[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=_lowercase )
else:
# cast to same type
__UpperCamelCase = logits.dtype
assert torch.allclose(original_logits.to(_lowercase ) , _lowercase , atol=1e-2 )
print('Looks ok!' )
print('Generating a caption...' )
__UpperCamelCase = ''
__UpperCamelCase = tokenizer(_lowercase , return_tensors='pt' ).input_ids.to(_lowercase )
__UpperCamelCase = original_model.generate({'image': original_pixel_values} )
__UpperCamelCase = hf_model.generate(
_lowercase , _lowercase , do_sample=_lowercase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , _lowercase )
__UpperCamelCase = input_ids.shape[1]
__UpperCamelCase = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowercase )
__UpperCamelCase = [text.strip() for text in output_text]
print('HF generation:' , _lowercase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowercase )
hf_model.save_pretrained(_lowercase )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
__snake_case = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
__snake_case = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 1 | 0 |
from __future__ import annotations
_snake_case : Any = "Muhammad Umer Farooq"
_snake_case : List[str] = "MIT"
_snake_case : Tuple = "1.0.0"
_snake_case : Any = "Muhammad Umer Farooq"
_snake_case : Optional[Any] = "[email protected]"
_snake_case : Optional[Any] = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : str , lowerCamelCase : str ) -> None:
super().__init__()
__snake_case : list[str] = []
__snake_case : Optional[Any] = domain
def __snake_case ( self : str , lowerCamelCase : str , lowerCamelCase : list[tuple[str, str | None]] ) -> None:
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__snake_case : Any = parse.urljoin(self.domain , lowerCamelCase )
self.urls.append(lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase ):
return ".".join(get_sub_domain_name(__lowerCamelCase ).split("." )[-2:] )
def lowerCAmelCase_ ( __lowerCamelCase ):
return parse.urlparse(__lowerCamelCase ).netloc
def lowerCAmelCase_ ( __lowerCamelCase = "https://github.com" ):
__snake_case : int = get_domain_name(__lowerCamelCase )
# Initialize the parser
__snake_case : List[str] = Parser(__lowerCamelCase )
try:
# Open URL
__snake_case : Dict = requests.get(__lowerCamelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__snake_case : int = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__snake_case : Dict = requests.get(__lowerCamelCase )
# Get the valid email.
__snake_case : int = re.findall("[a-zA-Z0-9]+@" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(__lowerCamelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(__lowerCamelCase )
if __name__ == "__main__":
_snake_case : str = emails_from_url("https://github.com")
print(f'''{len(emails)} emails found:''')
print("\n".join(sorted(emails)))
| 81 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__snake_case = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
_lowercase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_lowercase = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_lowercase = field(default=_a , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_lowercase = field(
default=_a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __lowerCamelCase :
_lowercase = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
_lowercase = field(
default=_a , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
_lowercase = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_lowercase = field(
default=_a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _A ( ) -> str:
"""simple docstring"""
__UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
__UpperCamelCase = import_module('tasks' )
try:
__UpperCamelCase = getattr(_lowercase , model_args.task_type )
__UpperCamelCase = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _lowercase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
__UpperCamelCase = token_classification_task.get_labels(data_args.labels )
__UpperCamelCase = dict(enumerate(_lowercase ) )
__UpperCamelCase = len(_lowercase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowercase , idalabel=_lowercase , labelaid={label: i for i, label in enumerate(_lowercase )} , cache_dir=model_args.cache_dir , )
__UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
__UpperCamelCase = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , )
# Get datasets
__UpperCamelCase = (
TokenClassificationDataset(
token_classification_task=_lowercase , data_dir=data_args.data_dir , tokenizer=_lowercase , labels=_lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__UpperCamelCase = (
TokenClassificationDataset(
token_classification_task=_lowercase , data_dir=data_args.data_dir , tokenizer=_lowercase , labels=_lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(_lowercase , _lowercase ) -> Tuple[List[int], List[int]]:
__UpperCamelCase = np.argmax(_lowercase , axis=2 )
__UpperCamelCase, __UpperCamelCase = preds.shape
__UpperCamelCase = [[] for _ in range(_lowercase )]
__UpperCamelCase = [[] for _ in range(_lowercase )]
for i in range(_lowercase ):
for j in range(_lowercase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(_lowercase ) -> Dict:
__UpperCamelCase, __UpperCamelCase = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(_lowercase , _lowercase ),
"precision": precision_score(_lowercase , _lowercase ),
"recall": recall_score(_lowercase , _lowercase ),
"f1": fa_score(_lowercase , _lowercase ),
}
# Data collator
__UpperCamelCase = DataCollatorWithPadding(_lowercase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__UpperCamelCase = Trainer(
model=_lowercase , args=_lowercase , train_dataset=_lowercase , eval_dataset=_lowercase , compute_metrics=_lowercase , data_collator=_lowercase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCamelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__UpperCamelCase = trainer.evaluate()
__UpperCamelCase = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(_lowercase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , _lowercase , _lowercase )
writer.write('%s = %s\n' % (key, value) )
results.update(_lowercase )
# Predict
if training_args.do_predict:
__UpperCamelCase = TokenClassificationDataset(
token_classification_task=_lowercase , data_dir=data_args.data_dir , tokenizer=_lowercase , labels=_lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = trainer.predict(_lowercase )
__UpperCamelCase, __UpperCamelCase = align_predictions(_lowercase , _lowercase )
__UpperCamelCase = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(_lowercase , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , _lowercase , _lowercase )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
__UpperCamelCase = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(_lowercase , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(_lowercase , _lowercase , _lowercase )
return results
def _A ( _lowercase ) -> Dict:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 1 | 0 |
"""simple docstring"""
from __future__ import annotations
def a__ ( lowerCAmelCase__ ):
return len(set(lowerCAmelCase__ ) ) == len(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def _A ( *_lowercase ) -> Tuple:
"""simple docstring"""
with open(_lowercase , 'r' ) as fh:
fcntl.flock(_lowercase , fcntl.LOCK_EX )
try:
print(*_lowercase )
finally:
fcntl.flock(_lowercase , fcntl.LOCK_UN )
__snake_case = int(os.environ['''LOCAL_RANK'''])
torch.cuda.set_device(local_rank)
__snake_case = torch.device('''cuda''', local_rank)
__snake_case = socket.gethostname()
__snake_case = f"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group('''nccl''')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__snake_case = dist.get_rank()
__snake_case = dist.get_world_size()
printflock(f"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(f"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(f"""{gpu} is broken""")
raise
| 1 | 0 |
"""simple docstring"""
import math
import unittest
def snake_case_ ( A_ : int ):
'''simple docstring'''
assert isinstance(A_, A_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(A_ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(1_1 ) )
self.assertTrue(is_prime(1_3 ) )
self.assertTrue(is_prime(1_7 ) )
self.assertTrue(is_prime(1_9 ) )
self.assertTrue(is_prime(2_3 ) )
self.assertTrue(is_prime(2_9 ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
with self.assertRaises(__lowerCAmelCase ):
is_prime(-1_9 )
self.assertFalse(
is_prime(0 ) , '''Zero doesn\'t have any positive factors, primes must have exactly two.''' , )
self.assertFalse(
is_prime(1 ) , '''One only has 1 positive factor, primes must have exactly two.''' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 83 |
import pytest
import datasets
# Import fixture modules as plugins
__snake_case = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def _A ( _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def _A ( _lowercase ) -> str:
"""simple docstring"""
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=_lowercase )
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
__UpperCamelCase = tmp_path_factory.getbasetemp() / 'cache'
__UpperCamelCase = test_hf_cache_home / 'datasets'
__UpperCamelCase = test_hf_cache_home / 'metrics'
__UpperCamelCase = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(_lowercase ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(_lowercase ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(_lowercase ) )
__UpperCamelCase = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(_lowercase ) )
__UpperCamelCase = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_lowercase ) )
@pytest.fixture(autouse=_lowercase , scope='session' )
def _A ( ) -> Dict:
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=_lowercase )
def _A ( _lowercase ) -> Tuple:
"""simple docstring"""
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , _lowercase )
@pytest.fixture
def _A ( _lowercase ) -> Any:
"""simple docstring"""
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , _lowercase )
| 1 | 0 |
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or number < 0:
raise ValueError('Input must be a non-negative integer' )
lowercase = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = VideoToVideoSDPipeline
_lowercase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"""video"""} ) - {"""image""", """width""", """height"""}
_lowercase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""video"""} ) - {"""image"""}
_lowercase = PipelineTesterMixin.required_optional_params - {"""latents"""}
_lowercase = False
# No `output_type`.
_lowercase = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64),layers_per_block=2,sample_size=32,in_channels=4,out_channels=4,down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D'),up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D'),cross_attention_dim=32,attention_head_dim=4,)
__UpperCamelCase = DDIMScheduler(
beta_start=0.0_0_0_8_5,beta_end=0.0_1_2,beta_schedule='scaled_linear',clip_sample=A_,set_alpha_to_one=A_,)
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64],in_channels=3,out_channels=3,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'],up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'],latent_channels=4,sample_size=128,)
torch.manual_seed(0 )
__UpperCamelCase = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,hidden_act='gelu',projection_dim=512,)
__UpperCamelCase = CLIPTextModel(A_ )
__UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__UpperCamelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def snake_case_ ( self: Union[str, Any],A_: Any,A_: Any=0 ):
'''simple docstring'''
__UpperCamelCase = floats_tensor((1, 3, 3, 32, 32),rng=random.Random(A_ ) ).to(A_ )
if str(A_ ).startswith('mps' ):
__UpperCamelCase = torch.manual_seed(A_ )
else:
__UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
__UpperCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'video': video,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = VideoToVideoSDPipeline(**A_ )
__UpperCamelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = self.get_dummy_inputs(A_ )
__UpperCamelCase = 'np'
__UpperCamelCase = sd_pipe(**A_ ).frames
__UpperCamelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
__UpperCamelCase = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(),reason='XFormers attention is only available with CUDA and `xformers` installed',)
def snake_case_ ( self: Any ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A_,expected_max_diff=5E-3 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def snake_case_ ( self: str ):
'''simple docstring'''
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def snake_case_ ( self: int ):
'''simple docstring'''
pass
def snake_case_ ( self: Any ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL',torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
__UpperCamelCase = torch.Generator(device='cpu' ).manual_seed(0 )
__UpperCamelCase = torch.randn((1, 10, 3, 1024, 576),generator=A_ )
__UpperCamelCase = video.to('cuda' )
__UpperCamelCase = 'Spiderman is surfing'
__UpperCamelCase = pipe(A_,video=A_,generator=A_,num_inference_steps=3,output_type='pt' ).frames
__UpperCamelCase = np.array([-1.0_4_5_8_9_8_4, -1.1_2_7_9_2_9_7, -0.9_6_6_3_0_8_6, -0.9_1_5_0_3_9_0_6, -0.7_5_0_9_7_6_5_6] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 1 | 0 |
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
SCREAMING_SNAKE_CASE__ : Dict = "platform"
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class snake_case :
lowercase_ = PegasusConfig
lowercase_ = {}
lowercase_ = 'gelu'
def __init__( self : Dict , a_ : Any , a_ : Tuple=13 , a_ : Optional[Any]=7 , a_ : List[Any]=True , a_ : Dict=False , a_ : Union[str, Any]=99 , a_ : Dict=32 , a_ : int=5 , a_ : Optional[int]=4 , a_ : Optional[int]=37 , a_ : List[Any]=0.1 , a_ : Any=0.1 , a_ : Dict=20 , a_ : List[str]=2 , a_ : Optional[Any]=1 , a_ : Optional[Any]=0 , )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = parent
SCREAMING_SNAKE_CASE__ : Tuple = batch_size
SCREAMING_SNAKE_CASE__ : str = seq_length
SCREAMING_SNAKE_CASE__ : int = is_training
SCREAMING_SNAKE_CASE__ : str = use_labels
SCREAMING_SNAKE_CASE__ : int = vocab_size
SCREAMING_SNAKE_CASE__ : Any = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Any = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = intermediate_size
SCREAMING_SNAKE_CASE__ : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Any = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Dict = eos_token_id
SCREAMING_SNAKE_CASE__ : Dict = pad_token_id
SCREAMING_SNAKE_CASE__ : Any = bos_token_id
def __lowercase( self : Dict )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE__ : Dict = np.concatenate([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Any = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
SCREAMING_SNAKE_CASE__ : Optional[int] = prepare_pegasus_inputs_dict(a_ , a_ , a_ )
return config, inputs_dict
def __lowercase( self : str , a_ : Any , a_ : str , a_ : str )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = 20
SCREAMING_SNAKE_CASE__ : int = model_class_name(a_ )
SCREAMING_SNAKE_CASE__ : Tuple = model.encode(inputs_dict['input_ids'] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
SCREAMING_SNAKE_CASE__ : List[str] = model.init_cache(decoder_input_ids.shape[0] , a_ , a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
SCREAMING_SNAKE_CASE__ : List[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
SCREAMING_SNAKE_CASE__ : str = model.decode(
decoder_input_ids[:, :-1] , a_ , decoder_attention_mask=a_ , past_key_values=a_ , decoder_position_ids=a_ , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model.decode(
decoder_input_ids[:, -1:] , a_ , decoder_attention_mask=a_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=a_ , )
SCREAMING_SNAKE_CASE__ : Tuple = model.decode(a_ , a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' )
def __lowercase( self : List[str] , a_ : Union[str, Any] , a_ : Dict , a_ : Tuple )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = 20
SCREAMING_SNAKE_CASE__ : str = model_class_name(a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = model.encode(inputs_dict['input_ids'] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
SCREAMING_SNAKE_CASE__ : Any = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
SCREAMING_SNAKE_CASE__ : str = model.init_cache(decoder_input_ids.shape[0] , a_ , a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
SCREAMING_SNAKE_CASE__ : Dict = model.decode(
decoder_input_ids[:, :-1] , a_ , decoder_attention_mask=a_ , past_key_values=a_ , decoder_position_ids=a_ , )
SCREAMING_SNAKE_CASE__ : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
SCREAMING_SNAKE_CASE__ : int = model.decode(
decoder_input_ids[:, -1:] , a_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=a_ , decoder_position_ids=a_ , )
SCREAMING_SNAKE_CASE__ : Any = model.decode(a_ , a_ , decoder_attention_mask=a_ )
SCREAMING_SNAKE_CASE__ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' )
def _a ( lowercase__ : Tuple , lowercase__ : Any , lowercase__ : Optional[int] , lowercase__ : str=None , lowercase__ : Union[str, Any]=None , ):
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.not_equal(lowercase__ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE__ : str = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
lowercase_ = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
lowercase_ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
lowercase_ = True
lowercase_ = False
lowercase_ = False
lowercase_ = False
def __lowercase( self : Union[str, Any] )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = FlaxPegasusModelTester(self )
SCREAMING_SNAKE_CASE__ : Dict = ConfigTester(self , config_class=a_ )
def __lowercase( self : Union[str, Any] )-> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowercase( self : List[str] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(a_ , a_ , a_ )
def __lowercase( self : Optional[int] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(a_ , a_ , a_ )
def __lowercase( self : Optional[int] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE__ : Tuple = self._prepare_for_class(a_ , a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = model_class(a_ )
@jax.jit
def encode_jitted(a_ : Any , a_ : Any=None , **a_ : int ):
return model.encode(input_ids=a_ , attention_mask=a_ )
with self.subTest('JIT Enabled' ):
SCREAMING_SNAKE_CASE__ : str = encode_jitted(**a_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE__ : Dict = encode_jitted(**a_ ).to_tuple()
self.assertEqual(len(a_ ) , len(a_ ) )
for jitted_output, output in zip(a_ , a_ ):
self.assertEqual(jitted_output.shape , output.shape )
def __lowercase( self : List[Any] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE__ : int = model_class(a_ )
SCREAMING_SNAKE_CASE__ : str = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
SCREAMING_SNAKE_CASE__ : List[str] = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(a_ : List[Any] , a_ : Dict , a_ : int ):
return model.decode(
decoder_input_ids=a_ , decoder_attention_mask=a_ , encoder_outputs=a_ , )
with self.subTest('JIT Enabled' ):
SCREAMING_SNAKE_CASE__ : Any = decode_jitted(**a_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE__ : List[Any] = decode_jitted(**a_ ).to_tuple()
self.assertEqual(len(a_ ) , len(a_ ) )
for jitted_output, output in zip(a_ , a_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __lowercase( self : Tuple )-> int:
"""simple docstring"""
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : int = model_class_name.from_pretrained('google/pegasus-large' , from_pt=a_ )
SCREAMING_SNAKE_CASE__ : Dict = np.ones((1, 1) )
SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ )
self.assertIsNotNone(a_ )
@slow
def __lowercase( self : Any )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = FlaxPegasusForConditionalGeneration.from_pretrained('google/pegasus-xsum' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = PegasusTokenizer.from_pretrained('google/pegasus-xsum' )
SCREAMING_SNAKE_CASE__ : List[Any] = [
' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.',
' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ',
]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
'California\'s largest electricity provider has turned off power to hundreds of thousands of customers.',
'Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.',
]
SCREAMING_SNAKE_CASE__ : int = tokenizer(a_ , return_tensors='np' , truncation=a_ , max_length=512 , padding=a_ )
SCREAMING_SNAKE_CASE__ : List[str] = model.generate(**a_ , num_beams=2 ).sequences
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.batch_decode(a_ , skip_special_tokens=a_ )
assert tgt_text == decoded
| 85 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
__snake_case = parser.parse_args()
__snake_case = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
__snake_case = CLIPImageProcessor()
__snake_case = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
__snake_case = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 1 | 0 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__a :Tuple = logging.get_logger(__name__)
__a :int = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
'constant': get_constant_schedule,
'constant_w_warmup': get_constant_schedule_with_warmup,
}
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : str=None , UpperCAmelCase : Union[str, Any]=None , *UpperCAmelCase : int , **UpperCAmelCase : List[str] ):
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
if config is None:
assert isinstance(self.model , UpperCAmelCase ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
A_ = self.model.config
else:
A_ = config
A_ = data_args
A_ = self.config.tgt_vocab_size if isinstance(self.config , UpperCAmelCase ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
" padding.." )
if self.args.label_smoothing == 0:
A_ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
A_ = label_smoothed_nll_loss
def __A ( self : Dict , UpperCAmelCase : int ):
if self.optimizer is None:
A_ = ["bias", "LayerNorm.weight"]
A_ = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
A_ = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
A_ = Adafactor
A_ = {"scale_parameter": False, "relative_step": False}
else:
A_ = AdamW
A_ = {
"betas": (self.args.adam_betaa, self.args.adam_betaa),
"eps": self.args.adam_epsilon,
}
A_ = self.args.learning_rate
if self.sharded_ddp:
A_ = OSS(
params=UpperCAmelCase , optim=UpperCAmelCase , **UpperCAmelCase , )
else:
A_ = optimizer_cls(UpperCAmelCase , **UpperCAmelCase )
if self.lr_scheduler is None:
A_ = self._get_lr_scheduler(UpperCAmelCase )
else: # ignoring --lr_scheduler
logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored." )
def __A ( self : Tuple , UpperCAmelCase : List[Any] ):
A_ = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
A_ = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
A_ = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
A_ = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=UpperCAmelCase )
return scheduler
def __A ( self : Optional[int] ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def __A ( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : str ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
A_ = model(**UpperCAmelCase , use_cache=UpperCAmelCase )[0]
A_ = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
A_ , A_ = model(**UpperCAmelCase , labels=UpperCAmelCase , use_cache=UpperCAmelCase )[:2]
else:
# compute label smoothed loss
A_ = model(**UpperCAmelCase , use_cache=UpperCAmelCase )[0]
A_ = torch.nn.functional.log_softmax(UpperCAmelCase , dim=-1 )
A_ , A_ = self.loss_fn(UpperCAmelCase , UpperCAmelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def __A ( self : int , UpperCAmelCase : str , UpperCAmelCase : int ):
A_ = inputs.pop("labels" )
A_ , A_ = self._compute_loss(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return loss
def __A ( self : Optional[Any] , UpperCAmelCase : nn.Module , UpperCAmelCase : Dict[str, Union[torch.Tensor, Any]] , UpperCAmelCase : bool , UpperCAmelCase : Optional[List[str]] = None , ):
A_ = self._prepare_inputs(UpperCAmelCase )
A_ = {
"max_length": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
A_ = self.model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , **UpperCAmelCase , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
A_ = self._pad_tensors_to_max_len(UpperCAmelCase , gen_kwargs["max_length"] )
A_ = inputs.pop("labels" )
with torch.no_grad():
# compute loss on predict data
A_ , A_ = self._compute_loss(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
A_ = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
A_ = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
A_ = self._pad_tensors_to_max_len(UpperCAmelCase , gen_kwargs["max_length"] )
return (loss, logits, labels)
def __A ( self : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple ):
# If PAD token is not defined at least EOS token has to be defined
A_ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"
f''' padded to `max_length`={max_length}''' )
A_ = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
A_ = tensor
return padded_tensor | 86 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 1 | 0 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = (DDPMScheduler,)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **UpperCAmelCase__ : Union[str, Any]) ->Any:
'''simple docstring'''
A__ = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**UpperCAmelCase__)
return config
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]:
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->str:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=UpperCAmelCase__ , beta_end=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Dict:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str) ->Dict:
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Dict) ->List[Any]:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Tuple:
'''simple docstring'''
self.check_over_configs(thresholding=UpperCAmelCase__)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase__ , prediction_type=UpperCAmelCase__ , sample_max_value=UpperCAmelCase__ , )
def SCREAMING_SNAKE_CASE ( self : List[str]) ->str:
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**UpperCAmelCase__)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.00979)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1e-5
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[Any]:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**UpperCAmelCase__)
A__ = len(UpperCAmelCase__)
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
A__ = torch.manual_seed(0)
for t in reversed(range(UpperCAmelCase__)):
# 1. predict noise residual
A__ = model(UpperCAmelCase__ , UpperCAmelCase__)
# 2. predict previous mean of sample x_t-1
A__ = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
A__ = pred_prev_sample
A__ = torch.sum(torch.abs(UpperCAmelCase__))
A__ = torch.mean(torch.abs(UpperCAmelCase__))
assert abs(result_sum.item() - 258.9606) < 1e-2
assert abs(result_mean.item() - 0.3372) < 1e-3
def SCREAMING_SNAKE_CASE ( self : int) ->int:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(prediction_type='''v_prediction''')
A__ = scheduler_class(**UpperCAmelCase__)
A__ = len(UpperCAmelCase__)
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
A__ = torch.manual_seed(0)
for t in reversed(range(UpperCAmelCase__)):
# 1. predict noise residual
A__ = model(UpperCAmelCase__ , UpperCAmelCase__)
# 2. predict previous mean of sample x_t-1
A__ = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
A__ = pred_prev_sample
A__ = torch.sum(torch.abs(UpperCAmelCase__))
A__ = torch.mean(torch.abs(UpperCAmelCase__))
assert abs(result_sum.item() - 202.0296) < 1e-2
assert abs(result_mean.item() - 0.2631) < 1e-3
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Tuple:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**UpperCAmelCase__)
A__ = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase__)
A__ = scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase__):
if i == len(UpperCAmelCase__) - 1:
A__ = -1
else:
A__ = timesteps[i + 1]
A__ = scheduler.previous_timestep(UpperCAmelCase__)
A__ = prev_t.item()
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[str]:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**UpperCAmelCase__)
A__ = [100, 87, 50, 51, 0]
with self.assertRaises(UpperCAmelCase__ , msg='''`custom_timesteps` must be in descending order.'''):
scheduler.set_timesteps(timesteps=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[int]:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**UpperCAmelCase__)
A__ = [100, 87, 50, 1, 0]
A__ = len(UpperCAmelCase__)
with self.assertRaises(UpperCAmelCase__ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.'''):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase__ , timesteps=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Dict) ->List[str]:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**UpperCAmelCase__)
A__ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase__ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase__)
| 87 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
__snake_case = '''src/diffusers'''
# Matches is_xxx_available()
__snake_case = re.compile(r'''is\_([a-z_]*)_available\(\)''')
# Matches from xxx import bla
__snake_case = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
__snake_case = '''
{0} = None
'''
__snake_case = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
'''
__snake_case = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
def _A ( _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = _re_backend.findall(_lowercase )
if len(_lowercase ) == 0:
return None
return "_and_".join(_lowercase )
def _A ( ) -> Tuple:
"""simple docstring"""
with open(os.path.join(_lowercase , '__init__.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
__UpperCamelCase = f.readlines()
# Get to the point we do the actual imports for type checking
__UpperCamelCase = 0
__UpperCamelCase = {}
# Go through the end of the file
while line_index < len(_lowercase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
__UpperCamelCase = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('else:' ):
line_index += 1
line_index += 1
__UpperCamelCase = []
# Until we unindent, add backend objects to the list
while line_index < len(_lowercase ) and len(lines[line_index] ) > 1:
__UpperCamelCase = lines[line_index]
__UpperCamelCase = _re_single_line_import.search(_lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(_lowercase ) > 0:
__UpperCamelCase = objects
else:
line_index += 1
return backend_specific_objects
def _A ( _lowercase , _lowercase ) -> Union[str, Any]:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(_lowercase )
elif name.islower():
return DUMMY_FUNCTION.format(_lowercase , _lowercase )
else:
return DUMMY_CLASS.format(_lowercase , _lowercase )
def _A ( _lowercase=None ) -> Optional[Any]:
"""simple docstring"""
if backend_specific_objects is None:
__UpperCamelCase = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
__UpperCamelCase = {}
for backend, objects in backend_specific_objects.items():
__UpperCamelCase = '[' + ', '.join(f'''"{b}"''' for b in backend.split('_and_' ) ) + ']'
__UpperCamelCase = '# This file is autogenerated by the command `make fix-copies`, do not edit.\n'
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(_lowercase , _lowercase ) for o in objects] )
__UpperCamelCase = dummy_file
return dummy_files
def _A ( _lowercase=False ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
__UpperCamelCase = {'torch': 'pt'}
# Locate actual dummy modules and read their content.
__UpperCamelCase = os.path.join(_lowercase , 'utils' )
__UpperCamelCase = {
backend: os.path.join(_lowercase , f'''dummy_{short_names.get(_lowercase , _lowercase )}_objects.py''' )
for backend in dummy_files.keys()
}
__UpperCamelCase = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(_lowercase ):
with open(_lowercase , 'r' , encoding='utf-8' , newline='\n' ) as f:
__UpperCamelCase = f.read()
else:
__UpperCamelCase = ''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f'''Updating diffusers.utils.dummy_{short_names.get(_lowercase , _lowercase )}_objects.py as the main '''
'__init__ has new objects.' )
with open(dummy_file_paths[backend] , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'The main __init__ has objects that are not present in '
f'''diffusers.utils.dummy_{short_names.get(_lowercase , _lowercase )}_objects.py. Run `make fix-copies` '''
'to fix this.' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__snake_case = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 1 | 0 |
"""simple docstring"""
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class lowercase__ ( A_ ):
@slow
@require_torch
def UpperCamelCase_ ( self) -> List[Any]:
_lowerCamelCase : int = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""")
_lowerCamelCase : Dict = BertTokenizer.from_pretrained("""bert-base-uncased""")
_lowerCamelCase : Tuple = bertabert.config.encoder.vocab_size
_lowerCamelCase : int = tokenizer.sep_token_id
_lowerCamelCase : List[str] = tokenizer.cls_token_id
_lowerCamelCase : Optional[int] = 128
_lowerCamelCase : int = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""")
_lowerCamelCase : Union[str, Any] = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""")
_lowerCamelCase : List[Any] = train_dataset.select(range(32))
_lowerCamelCase : Optional[int] = val_dataset.select(range(16))
_lowerCamelCase : Any = 4
def _map_to_encoder_decoder_inputs(SCREAMING_SNAKE_CASE):
# Tokenizer will automatically set [BOS] <text> [EOS]
_lowerCamelCase : List[Any] = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=SCREAMING_SNAKE_CASE , max_length=512)
_lowerCamelCase : Any = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=SCREAMING_SNAKE_CASE , max_length=128)
_lowerCamelCase : Optional[Any] = inputs.input_ids
_lowerCamelCase : Optional[int] = inputs.attention_mask
_lowerCamelCase : Union[str, Any] = outputs.input_ids
_lowerCamelCase : str = outputs.input_ids.copy()
_lowerCamelCase : List[Any] = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
_lowerCamelCase : Dict = outputs.attention_mask
assert all(len(SCREAMING_SNAKE_CASE) == 512 for x in inputs.input_ids)
assert all(len(SCREAMING_SNAKE_CASE) == 128 for x in outputs.input_ids)
return batch
def _compute_metrics(SCREAMING_SNAKE_CASE):
_lowerCamelCase : Any = pred.label_ids
_lowerCamelCase : Optional[int] = pred.predictions
# all unnecessary tokens are removed
_lowerCamelCase : Tuple = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Tuple = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Tuple = sum([int(pred_str[i] == label_str[i]) for i in range(len(SCREAMING_SNAKE_CASE))]) / len(SCREAMING_SNAKE_CASE)
return {"accuracy": accuracy}
# map train dataset
_lowerCamelCase : str = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
_lowerCamelCase : Union[str, Any] = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
_lowerCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
_lowerCamelCase : Union[str, Any] = SeqaSeqTrainingArguments(
output_dir=SCREAMING_SNAKE_CASE , per_device_train_batch_size=SCREAMING_SNAKE_CASE , per_device_eval_batch_size=SCREAMING_SNAKE_CASE , predict_with_generate=SCREAMING_SNAKE_CASE , evaluation_strategy="""steps""" , do_train=SCREAMING_SNAKE_CASE , do_eval=SCREAMING_SNAKE_CASE , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
_lowerCamelCase : Optional[Any] = SeqaSeqTrainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , compute_metrics=_compute_metrics , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , )
# start training
trainer.train()
| 88 |
import string
def _A ( _lowercase ) -> None:
"""simple docstring"""
for key in range(len(string.ascii_uppercase ) ):
__UpperCamelCase = ''
for symbol in message:
if symbol in string.ascii_uppercase:
__UpperCamelCase = string.ascii_uppercase.find(_lowercase )
__UpperCamelCase = num - key
if num < 0:
__UpperCamelCase = num + len(string.ascii_uppercase )
__UpperCamelCase = translated + string.ascii_uppercase[num]
else:
__UpperCamelCase = translated + symbol
print(f'''Decryption using Key #{key}: {translated}''' )
def _A ( ) -> None:
"""simple docstring"""
__UpperCamelCase = input('Encrypted message: ' )
__UpperCamelCase = message.upper()
decrypt(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 1 | 0 |
import re
def UpperCamelCase_( lowerCamelCase_ ) -> bool:
_lowercase : List[Any] = re.compile(
R'^(?:0|94|\+94|0{2}94)' R'7(0|1|2|4|5|6|7|8)' R'(-| |)' R'\d{7}$' )
return bool(re.search(lowerCamelCase_ , lowerCamelCase_ ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Dict = "0094702343221"
print(is_sri_lankan_phone_number(phone))
| 89 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = KandinskyInpaintPipeline
_lowercase = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
_lowercase = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
_lowercase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_lowercase = False
@property
def snake_case_ ( self: int ):
'''simple docstring'''
return 32
@property
def snake_case_ ( self: str ):
'''simple docstring'''
return 32
@property
def snake_case_ ( self: Tuple ):
'''simple docstring'''
return self.time_input_dim
@property
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
return 100
@property
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def snake_case_ ( self: Any ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = MCLIPConfig(
numDims=self.cross_attention_dim,transformerDimensions=self.text_embedder_hidden_size,hidden_size=self.text_embedder_hidden_size,intermediate_size=37,num_attention_heads=4,num_hidden_layers=5,vocab_size=1005,)
__UpperCamelCase = MultilingualCLIP(A_ )
__UpperCamelCase = text_encoder.eval()
return text_encoder
@property
def snake_case_ ( self: Any ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__UpperCamelCase = UNetaDConditionModel(**A_ )
return model
@property
def snake_case_ ( self: str ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def snake_case_ ( self: str ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = self.dummy_tokenizer
__UpperCamelCase = self.dummy_unet
__UpperCamelCase = self.dummy_movq
__UpperCamelCase = DDIMScheduler(
num_train_timesteps=1000,beta_schedule='linear',beta_start=0.0_0_0_8_5,beta_end=0.0_1_2,clip_sample=A_,set_alpha_to_one=A_,steps_offset=1,prediction_type='epsilon',thresholding=A_,)
__UpperCamelCase = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def snake_case_ ( self: Tuple,A_: Optional[int],A_: Dict=0 ):
'''simple docstring'''
__UpperCamelCase = floats_tensor((1, self.cross_attention_dim),rng=random.Random(A_ ) ).to(A_ )
__UpperCamelCase = floats_tensor((1, self.cross_attention_dim),rng=random.Random(seed + 1 ) ).to(A_ )
# create init_image
__UpperCamelCase = floats_tensor((1, 3, 64, 64),rng=random.Random(A_ ) ).to(A_ )
__UpperCamelCase = image.cpu().permute(0,2,3,1 )[0]
__UpperCamelCase = Image.fromarray(np.uinta(A_ ) ).convert('RGB' ).resize((256, 256) )
# create mask
__UpperCamelCase = np.ones((64, 64),dtype=np.floataa )
__UpperCamelCase = 0
if str(A_ ).startswith('mps' ):
__UpperCamelCase = torch.manual_seed(A_ )
else:
__UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
__UpperCamelCase = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = 'cpu'
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = self.pipeline_class(**A_ )
__UpperCamelCase = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = pipe(**self.get_dummy_inputs(A_ ) )
__UpperCamelCase = output.images
__UpperCamelCase = pipe(
**self.get_dummy_inputs(A_ ),return_dict=A_,)[0]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
print(F'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' )
__UpperCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
__UpperCamelCase = np.ones((768, 768),dtype=np.floataa )
__UpperCamelCase = 0
__UpperCamelCase = 'a hat'
__UpperCamelCase = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior',torch_dtype=torch.floataa )
pipe_prior.to(A_ )
__UpperCamelCase = KandinskyInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-inpaint',torch_dtype=torch.floataa )
__UpperCamelCase = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
__UpperCamelCase = torch.Generator(device='cpu' ).manual_seed(0 )
__UpperCamelCase, __UpperCamelCase = pipe_prior(
A_,generator=A_,num_inference_steps=5,negative_prompt='',).to_tuple()
__UpperCamelCase = pipeline(
A_,image=A_,mask_image=A_,image_embeds=A_,negative_image_embeds=A_,generator=A_,num_inference_steps=100,height=768,width=768,output_type='np',)
__UpperCamelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(A_,A_ )
| 1 | 0 |
'''simple docstring'''
import numpy
# List of input, output pairs
__UpperCAmelCase = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
__UpperCAmelCase = (((515, 22, 13), 555), ((61, 35, 49), 150))
__UpperCAmelCase = [2, 4, 1, 5]
__UpperCAmelCase = len(train_data)
__UpperCAmelCase = 0.009
def _snake_case ( A , A="train" ) -> Optional[Any]:
return calculate_hypothesis_value(A , A ) - output(
A , A )
def _snake_case ( A ) -> int:
lowerCAmelCase__ = 0
for i in range(len(A ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _snake_case ( A , A ) -> List[Any]:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _snake_case ( A , A ) -> int:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _snake_case ( A , A=m ) -> Tuple:
lowerCAmelCase__ = 0
for i in range(A ):
if index == -1:
summation_value += _error(A )
else:
summation_value += _error(A ) * train_data[i][0][index]
return summation_value
def _snake_case ( A ) -> List[str]:
lowerCAmelCase__ = summation_of_cost_derivative(A , A ) / m
return cost_derivative_value
def _snake_case ( ) -> str:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
lowerCAmelCase__ = 0.000_002
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
while True:
j += 1
lowerCAmelCase__ = [0, 0, 0, 0]
for i in range(0 , len(A ) ):
lowerCAmelCase__ = get_cost_derivative(i - 1 )
lowerCAmelCase__ = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
A , A , atol=A , rtol=A , ):
break
lowerCAmelCase__ = temp_parameter_vector
print(('''Number of iterations:''', j) )
def _snake_case ( ) -> Union[str, Any]:
for i in range(len(A ) ):
print(('''Actual output value:''', output(A , '''test''' )) )
print(('''Hypothesis output:''', calculate_hypothesis_value(A , '''test''' )) )
if __name__ == "__main__":
run_gradient_descent()
print('''\nTesting gradient descent for a linear hypothesis function.\n''')
test_gradient_descent() | 90 |
from typing import Any
class __lowerCamelCase :
def __init__( self: int,A_: Any ):
'''simple docstring'''
__UpperCamelCase = data
__UpperCamelCase = None
def __repr__( self: Any ):
'''simple docstring'''
return F'''Node({self.data})'''
class __lowerCamelCase :
def __init__( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = None
def __iter__( self: int ):
'''simple docstring'''
__UpperCamelCase = self.head
while node:
yield node.data
__UpperCamelCase = node.next
def __len__( self: List[str] ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self: Any ):
'''simple docstring'''
return "->".join([str(A_ ) for item in self] )
def __getitem__( self: int,A_: int ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self: int,A_: int,A_: Any ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
__UpperCamelCase = self.head
for _ in range(A_ ):
__UpperCamelCase = current.next
__UpperCamelCase = data
def snake_case_ ( self: Union[str, Any],A_: Any ):
'''simple docstring'''
self.insert_nth(len(self ),A_ )
def snake_case_ ( self: List[Any],A_: Any ):
'''simple docstring'''
self.insert_nth(0,A_ )
def snake_case_ ( self: Optional[Any],A_: int,A_: Any ):
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
__UpperCamelCase = Node(A_ )
if self.head is None:
__UpperCamelCase = new_node
elif index == 0:
__UpperCamelCase = self.head # link new_node to head
__UpperCamelCase = new_node
else:
__UpperCamelCase = self.head
for _ in range(index - 1 ):
__UpperCamelCase = temp.next
__UpperCamelCase = temp.next
__UpperCamelCase = new_node
def snake_case_ ( self: str ): # print every node data
'''simple docstring'''
print(self )
def snake_case_ ( self: int ):
'''simple docstring'''
return self.delete_nth(0 )
def snake_case_ ( self: str ): # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def snake_case_ ( self: Any,A_: int = 0 ):
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
__UpperCamelCase = self.head # default first node
if index == 0:
__UpperCamelCase = self.head.next
else:
__UpperCamelCase = self.head
for _ in range(index - 1 ):
__UpperCamelCase = temp.next
__UpperCamelCase = temp.next
__UpperCamelCase = temp.next.next
return delete_node.data
def snake_case_ ( self: Any ):
'''simple docstring'''
return self.head is None
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = None
__UpperCamelCase = self.head
while current:
# Store the current node's next node.
__UpperCamelCase = current.next
# Make the current node's next point backwards
__UpperCamelCase = prev
# Make the previous node be the current node
__UpperCamelCase = current
# Make the current node the next node (to progress iteration)
__UpperCamelCase = next_node
# Return prev in order to put the head at the end
__UpperCamelCase = prev
def _A ( ) -> None:
"""simple docstring"""
__UpperCamelCase = LinkedList()
assert linked_list.is_empty() is True
assert str(_lowercase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_lowercase ) == i
linked_list.insert_nth(_lowercase , i + 1 )
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_lowercase ) == 9
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
__UpperCamelCase = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(-8 , 1 ) )
def _A ( ) -> None:
"""simple docstring"""
__UpperCamelCase = [
-9,
1_00,
Node(77_34_51_12 ),
'dlrow olleH',
7,
55_55,
0,
-1_92.5_55_55,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
__UpperCamelCase = LinkedList()
for i in test_input:
linked_list.insert_tail(_lowercase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_lowercase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
__UpperCamelCase = linked_list.delete_head()
assert result == -9
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
__UpperCamelCase = linked_list.delete_tail()
assert result == 12.2
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
__UpperCamelCase = linked_list.delete_nth(10 )
assert result is None
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(_lowercase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_lowercase )
assert (
str(_lowercase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_lowercase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _A ( ) -> List[str]:
"""simple docstring"""
from doctest import testmod
testmod()
__UpperCamelCase = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(_lowercase )
print('\nReading/changing Node data using indexing:' )
print(f'''Element at Position 1: {linked_list[1]}''' )
__UpperCamelCase = input('Enter New Value: ' ).strip()
print('New list:' )
print(_lowercase )
print(f'''length of linked_list is : {len(_lowercase )}''' )
if __name__ == "__main__":
main()
| 1 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowercase = {
'''configuration_groupvit''': [
'''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''GroupViTConfig''',
'''GroupViTOnnxConfig''',
'''GroupViTTextConfig''',
'''GroupViTVisionConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GroupViTModel''',
'''GroupViTPreTrainedModel''',
'''GroupViTTextModel''',
'''GroupViTVisionModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFGroupViTModel''',
'''TFGroupViTPreTrainedModel''',
'''TFGroupViTTextModel''',
'''TFGroupViTVisionModel''',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 91 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 1 | 0 |
'''simple docstring'''
from cva import destroyAllWindows, imread, imshow, waitKey
def _lowerCAmelCase ( __magic_name__ : Any ) -> Optional[int]:
# getting number of pixels in the image
lowercase , lowercase : List[Any] =img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(__magic_name__ ):
for j in range(__magic_name__ ):
lowercase : str =[255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
UpperCamelCase_ = imread("""image_data/lena.jpg""", 1)
# convert to its negative
UpperCamelCase_ = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 92 |
__snake_case = {
'''a''': '''AAAAA''',
'''b''': '''AAAAB''',
'''c''': '''AAABA''',
'''d''': '''AAABB''',
'''e''': '''AABAA''',
'''f''': '''AABAB''',
'''g''': '''AABBA''',
'''h''': '''AABBB''',
'''i''': '''ABAAA''',
'''j''': '''BBBAA''',
'''k''': '''ABAAB''',
'''l''': '''ABABA''',
'''m''': '''ABABB''',
'''n''': '''ABBAA''',
'''o''': '''ABBAB''',
'''p''': '''ABBBA''',
'''q''': '''ABBBB''',
'''r''': '''BAAAA''',
'''s''': '''BAAAB''',
'''t''': '''BAABA''',
'''u''': '''BAABB''',
'''v''': '''BBBAB''',
'''w''': '''BABAA''',
'''x''': '''BABAB''',
'''y''': '''BABBA''',
'''z''': '''BABBB''',
''' ''': ''' ''',
}
__snake_case = {value: key for key, value in encode_dict.items()}
def _A ( _lowercase ) -> str:
"""simple docstring"""
__UpperCamelCase = ''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('encode() accepts only letters of the alphabet and spaces' )
return encoded
def _A ( _lowercase ) -> str:
"""simple docstring"""
if set(_lowercase ) - {"A", "B", " "} != set():
raise Exception('decode() accepts only \'A\', \'B\' and spaces' )
__UpperCamelCase = ''
for word in coded.split():
while len(_lowercase ) != 0:
decoded += decode_dict[word[:5]]
__UpperCamelCase = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :int
__magic_name__ :int
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :list[list[Edge]] = [[] for _ in range(__UpperCAmelCase )]
lowerCAmelCase__ :Any = size
def __getitem__( self , __UpperCAmelCase ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def snake_case ( self ):
'''simple docstring'''
return self._size
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(__UpperCAmelCase , __UpperCAmelCase ) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = deque([start_vertex] )
lowerCAmelCase__ :list[int | None] = [None] * self.size
lowerCAmelCase__ :Any = 0
while queue:
lowerCAmelCase__ :Union[str, Any] = queue.popleft()
lowerCAmelCase__ :Any = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowerCAmelCase__ :Any = current_distance + edge.weight
lowerCAmelCase__ :Optional[int] = distances[edge.destination_vertex]
if (
isinstance(__UpperCAmelCase , __UpperCAmelCase )
and new_distance >= dest_vertex_distance
):
continue
lowerCAmelCase__ :Dict = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 93 |
from collections.abc import Generator
from math import sin
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
if len(_lowercase ) != 32:
raise ValueError('Input must be of length 32' )
__UpperCamelCase = B''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__UpperCamelCase = format(_lowercase , '08x' )[-8:]
__UpperCamelCase = B''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
__UpperCamelCase = B''
for char in message:
bit_string += format(_lowercase , '08b' ).encode('utf-8' )
__UpperCamelCase = format(len(_lowercase ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(_lowercase ) % 5_12 != 4_48:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def _A ( _lowercase ) -> Generator[list[int], None, None]:
"""simple docstring"""
if len(_lowercase ) % 5_12 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(_lowercase ) , 5_12 ):
__UpperCamelCase = bit_string[pos : pos + 5_12]
__UpperCamelCase = []
for i in range(0 , 5_12 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def _A ( _lowercase ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__UpperCamelCase = format(_lowercase , '032b' )
__UpperCamelCase = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(_lowercase , 2 )
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
return (a + b) % 2**32
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
__UpperCamelCase = preprocess(_lowercase )
__UpperCamelCase = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
__UpperCamelCase = 0X67_45_23_01
__UpperCamelCase = 0Xef_cd_ab_89
__UpperCamelCase = 0X98_ba_dc_fe
__UpperCamelCase = 0X10_32_54_76
__UpperCamelCase = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(_lowercase ):
__UpperCamelCase = aa
__UpperCamelCase = ba
__UpperCamelCase = ca
__UpperCamelCase = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__UpperCamelCase = d ^ (b & (c ^ d))
__UpperCamelCase = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__UpperCamelCase = c ^ (d & (b ^ c))
__UpperCamelCase = (5 * i + 1) % 16
elif i <= 47:
__UpperCamelCase = b ^ c ^ d
__UpperCamelCase = (3 * i + 5) % 16
else:
__UpperCamelCase = c ^ (b | not_aa(_lowercase ))
__UpperCamelCase = (7 * i) % 16
__UpperCamelCase = (f + a + added_consts[i] + block_words[g]) % 2**32
__UpperCamelCase = d
__UpperCamelCase = c
__UpperCamelCase = b
__UpperCamelCase = sum_aa(_lowercase , left_rotate_aa(_lowercase , shift_amounts[i] ) )
# Add hashed chunk to running total
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = reformat_hex(_lowercase ) + reformat_hex(_lowercase ) + reformat_hex(_lowercase ) + reformat_hex(_lowercase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''pixel_values''']
def __init__( self : List[str] , UpperCAmelCase : bool = True , UpperCAmelCase : int = 32 , UpperCAmelCase : Any=PILImageResampling.BILINEAR , UpperCAmelCase : bool = True , **UpperCAmelCase : Dict , ) -> None:
'''simple docstring'''
lowercase : Dict =do_resize
lowercase : Optional[int] =do_rescale
lowercase : Tuple =size_divisor
lowercase : List[str] =resample
super().__init__(**UpperCAmelCase )
def A__ ( self : Tuple , UpperCAmelCase : np.ndarray , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Optional[ChannelDimension] = None , **UpperCAmelCase : Any ) -> np.ndarray:
'''simple docstring'''
lowercase , lowercase : Any =get_image_size(UpperCAmelCase )
# Rounds the height and width down to the closest multiple of size_divisor
lowercase : Optional[Any] =height // size_divisor * size_divisor
lowercase : Union[str, Any] =width // size_divisor * size_divisor
lowercase : List[str] =resize(UpperCAmelCase , (new_h, new_w) , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
return image
def A__ ( self : int , UpperCAmelCase : np.ndarray , UpperCAmelCase : float , UpperCAmelCase : Optional[ChannelDimension] = None , **UpperCAmelCase : List[str] ) -> np.ndarray:
'''simple docstring'''
return rescale(image=UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : List[str] , UpperCAmelCase : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[Union[TensorType, str]] = None , UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase : Dict , ) -> BatchFeature:
'''simple docstring'''
lowercase : List[Any] =do_resize if do_resize is not None else self.do_resize
lowercase : Union[str, Any] =do_rescale if do_rescale is not None else self.do_rescale
lowercase : Optional[int] =size_divisor if size_divisor is not None else self.size_divisor
lowercase : Optional[Any] =resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
lowercase : List[str] =make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
lowercase : Union[str, Any] =[to_numpy_array(UpperCAmelCase ) for img in images]
if do_resize:
lowercase : int =[self.resize(UpperCAmelCase , size_divisor=UpperCAmelCase , resample=UpperCAmelCase ) for image in images]
if do_rescale:
lowercase : Any =[self.rescale(UpperCAmelCase , scale=1 / 255 ) for image in images]
lowercase : Optional[Any] =[to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
lowercase : List[str] ={'''pixel_values''': images}
return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
| 94 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__snake_case = 0
__snake_case = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__snake_case = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__snake_case = tuple[int, int]
class __lowerCamelCase :
def __init__( self: str,A_: int,A_: int,A_: int,A_: int,A_: int,A_: Node | None,):
'''simple docstring'''
__UpperCamelCase = pos_x
__UpperCamelCase = pos_y
__UpperCamelCase = (pos_y, pos_x)
__UpperCamelCase = goal_x
__UpperCamelCase = goal_y
__UpperCamelCase = g_cost
__UpperCamelCase = parent
__UpperCamelCase = self.calculate_heuristic()
__UpperCamelCase = self.g_cost + self.h_cost
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = self.pos_x - self.goal_x
__UpperCamelCase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(A_ ) + abs(A_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self: int,A_: Node ):
'''simple docstring'''
return self.f_cost < other.f_cost
class __lowerCamelCase :
def __init__( self: Any,A_: TPosition,A_: TPosition ):
'''simple docstring'''
__UpperCamelCase = Node(start[1],start[0],goal[1],goal[0],0,A_ )
__UpperCamelCase = Node(goal[1],goal[0],goal[1],goal[0],9_9999,A_ )
__UpperCamelCase = [self.start]
__UpperCamelCase = []
__UpperCamelCase = False
def snake_case_ ( self: Any ):
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__UpperCamelCase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(A_ )
self.closed_nodes.append(A_ )
__UpperCamelCase = self.get_successors(A_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(A_ )
else:
# retrieve the best current path
__UpperCamelCase = self.open_nodes.pop(self.open_nodes.index(A_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(A_ )
else:
self.open_nodes.append(A_ )
return [self.start.pos]
def snake_case_ ( self: int,A_: Node ):
'''simple docstring'''
__UpperCamelCase = []
for action in delta:
__UpperCamelCase = parent.pos_x + action[1]
__UpperCamelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
A_,A_,self.target.pos_y,self.target.pos_x,parent.g_cost + 1,A_,) )
return successors
def snake_case_ ( self: Any,A_: Node | None ):
'''simple docstring'''
__UpperCamelCase = node
__UpperCamelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__UpperCamelCase = current_node.parent
path.reverse()
return path
class __lowerCamelCase :
def __init__( self: List[Any],A_: TPosition,A_: TPosition ):
'''simple docstring'''
__UpperCamelCase = AStar(A_,A_ )
__UpperCamelCase = AStar(A_,A_ )
__UpperCamelCase = False
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__UpperCamelCase = self.fwd_astar.open_nodes.pop(0 )
__UpperCamelCase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
A_,A_ )
self.fwd_astar.closed_nodes.append(A_ )
self.bwd_astar.closed_nodes.append(A_ )
__UpperCamelCase = current_bwd_node
__UpperCamelCase = current_fwd_node
__UpperCamelCase = {
self.fwd_astar: self.fwd_astar.get_successors(A_ ),
self.bwd_astar: self.bwd_astar.get_successors(A_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(A_ )
else:
# retrieve the best current path
__UpperCamelCase = astar.open_nodes.pop(
astar.open_nodes.index(A_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(A_ )
else:
astar.open_nodes.append(A_ )
return [self.fwd_astar.start.pos]
def snake_case_ ( self: List[str],A_: Node,A_: Node ):
'''simple docstring'''
__UpperCamelCase = self.fwd_astar.retrace_path(A_ )
__UpperCamelCase = self.bwd_astar.retrace_path(A_ )
bwd_path.pop()
bwd_path.reverse()
__UpperCamelCase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__snake_case = (0, 0)
__snake_case = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__snake_case = time.time()
__snake_case = AStar(init, goal)
__snake_case = a_star.search()
__snake_case = time.time() - start_time
print(f"""AStar execution time = {end_time:f} seconds""")
__snake_case = time.time()
__snake_case = BidirectionalAStar(init, goal)
__snake_case = time.time() - bd_start_time
print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 1 | 0 |
"""simple docstring"""
class UpperCamelCase_ (__A ):
pass
class UpperCamelCase_ (__A ):
pass
class UpperCamelCase_ :
def __init__( self : List[str] ) -> Tuple:
UpperCAmelCase_ : int = [
[],
[],
[],
]
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> None:
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError("Maximum queue size is 100" )
self.queues[priority].append(lowerCAmelCase_ )
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2" )
def _SCREAMING_SNAKE_CASE ( self : str ) -> int:
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("All queues are empty" )
def __str__( self : int ) -> str:
return "\n".join(f"""Priority {i}: {q}""" for i, q in enumerate(self.queues ) )
class UpperCamelCase_ :
def __init__( self : int ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = []
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : int ) -> None:
if len(self.queue ) == 100:
raise OverFlowError("Maximum queue size is 100" )
self.queue.append(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> int:
if not self.queue:
raise UnderFlowError("The queue is empty" )
else:
UpperCAmelCase_ : Union[str, Any] = min(self.queue )
self.queue.remove(lowerCAmelCase_ )
return data
def __str__( self : Dict ) -> str:
return str(self.queue )
def snake_case ( ):
UpperCAmelCase_ : Dict = FixedPriorityQueue()
fpq.enqueue(0 ,10 )
fpq.enqueue(1 ,70 )
fpq.enqueue(0 ,1_00 )
fpq.enqueue(2 ,1 )
fpq.enqueue(2 ,5 )
fpq.enqueue(1 ,7 )
fpq.enqueue(2 ,4 )
fpq.enqueue(1 ,64 )
fpq.enqueue(0 ,1_28 )
print(A__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(A__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def snake_case ( ):
UpperCAmelCase_ : Dict = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(1_00 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(1_28 )
print(A__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(A__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 95 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__snake_case = get_tests_dir('''fixtures''')
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: int ):
'''simple docstring'''
__UpperCamelCase = mock.Mock()
__UpperCamelCase = 500
__UpperCamelCase = {}
__UpperCamelCase = HTTPError
__UpperCamelCase = {}
# Download this model to make sure it's in the cache.
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request',return_value=A_ ) as mock_head:
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' )
@is_staging_test
class __lowerCamelCase (unittest.TestCase ):
@classmethod
def snake_case_ ( cls: Tuple ):
'''simple docstring'''
__UpperCamelCase = TOKEN
HfFolder.save_token(A_ )
@classmethod
def snake_case_ ( cls: Tuple ):
'''simple docstring'''
try:
delete_repo(token=cls._token,repo_id='test-feature-extractor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token,repo_id='valid_org/test-feature-extractor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token,repo_id='test-dynamic-feature-extractor' )
except HTTPError:
pass
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('test-feature-extractor',use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
# Reset repo
delete_repo(token=self._token,repo_id='test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
A_,repo_id='test-feature-extractor',push_to_hub=A_,use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('valid_org/test-feature-extractor',use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
# Reset repo
delete_repo(token=self._token,repo_id='valid_org/test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
A_,repo_id='valid_org/test-feature-extractor-org',push_to_hub=A_,use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
def snake_case_ ( self: int ):
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
__UpperCamelCase = CustomFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('test-dynamic-feature-extractor',use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map,{'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'},)
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(
F'''{USER}/test-dynamic-feature-extractor''',trust_remote_code=A_ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__,'CustomFeatureExtractor' )
| 1 | 0 |
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/config.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/config.json',
}
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = "xlnet"
UpperCAmelCase__ = ["mems"]
UpperCAmelCase__ = {
"n_token": "vocab_size", # Backward compatibility
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Optional[Any] , __snake_case : int=3_2_0_0_0 , __snake_case : Tuple=1_0_2_4 , __snake_case : List[str]=2_4 , __snake_case : Dict=1_6 , __snake_case : Tuple=4_0_9_6 , __snake_case : str="gelu" , __snake_case : Dict=True , __snake_case : Optional[Any]="bi" , __snake_case : Any=0.02 , __snake_case : Tuple=1E-12 , __snake_case : List[str]=0.1 , __snake_case : Any=5_1_2 , __snake_case : Optional[int]=None , __snake_case : Any=True , __snake_case : List[str]=False , __snake_case : Dict=False , __snake_case : List[Any]=-1 , __snake_case : List[Any]=False , __snake_case : Tuple="last" , __snake_case : Optional[Any]=True , __snake_case : List[str]="tanh" , __snake_case : Any=0.1 , __snake_case : str=5 , __snake_case : Optional[Any]=5 , __snake_case : List[str]=5 , __snake_case : str=1 , __snake_case : Dict=2 , **__snake_case : Dict , ) -> Optional[int]:
__magic_name__: Dict = vocab_size
__magic_name__: List[Any] = d_model
__magic_name__: int = n_layer
__magic_name__: List[Any] = n_head
if d_model % n_head != 0:
raise ValueError(F'\'d_model % n_head\' ({d_model % n_head}) should be equal to 0' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F'`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})' )
__magic_name__: str = d_model // n_head
__magic_name__: Optional[Any] = ff_activation
__magic_name__: Tuple = d_inner
__magic_name__: int = untie_r
__magic_name__: Optional[Any] = attn_type
__magic_name__: str = initializer_range
__magic_name__: List[Any] = layer_norm_eps
__magic_name__: Union[str, Any] = dropout
__magic_name__: List[Any] = mem_len
__magic_name__: str = reuse_len
__magic_name__: int = bi_data
__magic_name__: Optional[int] = clamp_len
__magic_name__: str = same_length
__magic_name__: Tuple = summary_type
__magic_name__: List[Any] = summary_use_proj
__magic_name__: Union[str, Any] = summary_activation
__magic_name__: Tuple = summary_last_dropout
__magic_name__: Union[str, Any] = start_n_top
__magic_name__: Optional[int] = end_n_top
__magic_name__: Tuple = bos_token_id
__magic_name__: int = pad_token_id
__magic_name__: List[Any] = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"""The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"""
""" instead.""" , __snake_case , )
__magic_name__: int = kwargs["""use_cache"""]
__magic_name__: str = use_mems_eval
__magic_name__: List[str] = use_mems_train
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
@property
def lowerCamelCase__ ( self : Dict ) -> Any:
logger.info(F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def lowerCamelCase__ ( self : Tuple , __snake_case : Optional[Any] ) -> str:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 96 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__snake_case = 1_6
__snake_case = 3_2
def _A ( _lowercase , _lowercase = 16 , _lowercase = "bert-base-cased" ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = AutoTokenizer.from_pretrained(_lowercase )
__UpperCamelCase = load_dataset('glue' , 'mrpc' )
def tokenize_function(_lowercase ):
# max_length=None => use the model max length (it's actually the default)
__UpperCamelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowercase , max_length=_lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__UpperCamelCase = datasets.map(
_lowercase , batched=_lowercase , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_lowercase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCamelCase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowercase , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return tokenizer.pad(_lowercase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
__UpperCamelCase = DataLoader(
tokenized_datasets['train'] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
__UpperCamelCase = DataLoader(
tokenized_datasets['validation'] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
return train_dataloader, eval_dataloader
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCamelCase = config['lr']
__UpperCamelCase = int(config['num_epochs'] )
__UpperCamelCase = int(config['seed'] )
__UpperCamelCase = int(config['batch_size'] )
__UpperCamelCase = args.model_name_or_path
set_seed(_lowercase )
__UpperCamelCase, __UpperCamelCase = get_dataloaders(_lowercase , _lowercase , _lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCamelCase = AutoModelForSequenceClassification.from_pretrained(_lowercase , return_dict=_lowercase )
# Instantiate optimizer
__UpperCamelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__UpperCamelCase = optimizer_cls(params=model.parameters() , lr=_lowercase )
if accelerator.state.deepspeed_plugin is not None:
__UpperCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
__UpperCamelCase = 1
__UpperCamelCase = (len(_lowercase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=_lowercase , num_warmup_steps=0 , num_training_steps=_lowercase , )
else:
__UpperCamelCase = DummyScheduler(_lowercase , total_num_steps=_lowercase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# We need to keep track of how many total steps we have iterated over
__UpperCamelCase = 0
# We also need to keep track of the stating epoch so files are named properly
__UpperCamelCase = 0
# Now we train the model
__UpperCamelCase = evaluate.load('glue' , 'mrpc' )
__UpperCamelCase = 0
__UpperCamelCase = {}
for epoch in range(_lowercase , _lowercase ):
model.train()
for step, batch in enumerate(_lowercase ):
__UpperCamelCase = model(**_lowercase )
__UpperCamelCase = outputs.loss
__UpperCamelCase = loss / gradient_accumulation_steps
accelerator.backward(_lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
__UpperCamelCase = 0
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCamelCase = model(**_lowercase )
__UpperCamelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__UpperCamelCase, __UpperCamelCase = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_lowercase ) - 1:
__UpperCamelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__UpperCamelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_lowercase , references=_lowercase , )
__UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , _lowercase )
__UpperCamelCase = eval_metric['accuracy']
if best_performance < eval_metric["accuracy"]:
__UpperCamelCase = eval_metric['accuracy']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(_lowercase , _lowercase )
def _A ( ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=_lowercase , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_lowercase , )
parser.add_argument(
'--output_dir' , type=_lowercase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=_lowercase , default=_lowercase , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=_lowercase , default=3 , help='Number of train epochs.' , )
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(_lowercase , _lowercase )
if __name__ == "__main__":
main()
| 1 | 0 |
from __future__ import annotations
def a ( snake_case__: list[list[int]] ):
'''simple docstring'''
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(snake_case__ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(snake_case__ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 97 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __lowerCamelCase (_a ):
@slow
@require_torch
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny','prajjwal1/bert-tiny' )
__UpperCamelCase = BertTokenizer.from_pretrained('bert-base-uncased' )
__UpperCamelCase = bertabert.config.encoder.vocab_size
__UpperCamelCase = tokenizer.sep_token_id
__UpperCamelCase = tokenizer.cls_token_id
__UpperCamelCase = 128
__UpperCamelCase = datasets.load_dataset('cnn_dailymail','3.0.0',split='train[:1%]' )
__UpperCamelCase = datasets.load_dataset('cnn_dailymail','3.0.0',split='validation[:1%]' )
__UpperCamelCase = train_dataset.select(range(32 ) )
__UpperCamelCase = val_dataset.select(range(16 ) )
__UpperCamelCase = 4
def _map_to_encoder_decoder_inputs(A_: Dict ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__UpperCamelCase = tokenizer(batch['article'],padding='max_length',truncation=A_,max_length=512 )
__UpperCamelCase = tokenizer(batch['highlights'],padding='max_length',truncation=A_,max_length=128 )
__UpperCamelCase = inputs.input_ids
__UpperCamelCase = inputs.attention_mask
__UpperCamelCase = outputs.input_ids
__UpperCamelCase = outputs.input_ids.copy()
__UpperCamelCase = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
__UpperCamelCase = outputs.attention_mask
assert all(len(A_ ) == 512 for x in inputs.input_ids )
assert all(len(A_ ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(A_: str ):
__UpperCamelCase = pred.label_ids
__UpperCamelCase = pred.predictions
# all unnecessary tokens are removed
__UpperCamelCase = tokenizer.batch_decode(A_,skip_special_tokens=A_ )
__UpperCamelCase = tokenizer.batch_decode(A_,skip_special_tokens=A_ )
__UpperCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(A_ ) )] ) / len(A_ )
return {"accuracy": accuracy}
# map train dataset
__UpperCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs,batched=A_,batch_size=A_,remove_columns=['article', 'highlights'],)
train_dataset.set_format(
type='torch',columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'],)
# same for validation dataset
__UpperCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs,batched=A_,batch_size=A_,remove_columns=['article', 'highlights'],)
val_dataset.set_format(
type='torch',columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'],)
__UpperCamelCase = self.get_auto_remove_tmp_dir()
__UpperCamelCase = SeqaSeqTrainingArguments(
output_dir=A_,per_device_train_batch_size=A_,per_device_eval_batch_size=A_,predict_with_generate=A_,evaluation_strategy='steps',do_train=A_,do_eval=A_,warmup_steps=0,eval_steps=2,logging_steps=2,)
# instantiate trainer
__UpperCamelCase = SeqaSeqTrainer(
model=A_,args=A_,compute_metrics=_compute_metrics,train_dataset=A_,eval_dataset=A_,tokenizer=A_,)
# start training
trainer.train()
| 1 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ : Tuple = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[int] = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[Any] = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Union[str, Any] = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[Any] = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[str] = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowercase__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 98 |
def _A ( _lowercase = 1_00 ) -> int:
"""simple docstring"""
__UpperCamelCase = 0
__UpperCamelCase = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 1 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_upernet': ['UperNetConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'UperNetForSemanticSegmentation',
'UperNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 99 |
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def _A ( _lowercase , _lowercase=0 ) -> Dict:
"""simple docstring"""
return sorted(_lowercase , key=lambda _lowercase : x[column] )
def _A ( _lowercase , _lowercase , _lowercase=float('inf' ) ) -> List[Any]:
"""simple docstring"""
for i in range(points_counts - 1 ):
for j in range(i + 1 , _lowercase ):
__UpperCamelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__UpperCamelCase = current_dis
return min_dis
def _A ( _lowercase , _lowercase , _lowercase=float('inf' ) ) -> Tuple:
"""simple docstring"""
for i in range(min(6 , points_counts - 1 ) , _lowercase ):
for j in range(max(0 , i - 6 ) , _lowercase ):
__UpperCamelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__UpperCamelCase = current_dis
return min_dis
def _A ( _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
"""simple docstring"""
if points_counts <= 3:
return dis_between_closest_pair(_lowercase , _lowercase )
# recursion
__UpperCamelCase = points_counts // 2
__UpperCamelCase = closest_pair_of_points_sqr(
_lowercase , points_sorted_on_y[:mid] , _lowercase )
__UpperCamelCase = closest_pair_of_points_sqr(
_lowercase , points_sorted_on_y[mid:] , points_counts - mid )
__UpperCamelCase = min(_lowercase , _lowercase )
__UpperCamelCase = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(_lowercase )
__UpperCamelCase = dis_between_closest_in_strip(
_lowercase , len(_lowercase ) , _lowercase )
return min(_lowercase , _lowercase )
def _A ( _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = column_based_sort(_lowercase , column=0 )
__UpperCamelCase = column_based_sort(_lowercase , column=1 )
return (
closest_pair_of_points_sqr(
_lowercase , _lowercase , _lowercase )
) ** 0.5
if __name__ == "__main__":
__snake_case = [(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)]
print('''Distance:''', closest_pair_of_points(points, len(points)))
| 1 | 0 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __snake_case :
'''simple docstring'''
def __init__( self , A_ , A_=13 , A_=10 , A_=3 , A_=2 , A_=2 , A_=True , A_=True , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_="divided_space_time" , A_=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = num_frames
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_type
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = scope
SCREAMING_SNAKE_CASE__ = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
SCREAMING_SNAKE_CASE__ = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ = (num_frames) * self.num_patches_per_frame + 1
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
SCREAMING_SNAKE_CASE__ = self.num_labels
return config
def lowercase_ ( self , A_ , A_ , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TimesformerModel(config=A_ )
model.to(A_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , A_ , A_ , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TimesformerForVideoClassification(A_ )
model.to(A_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(A_ )
# verify the logits shape
SCREAMING_SNAKE_CASE__ = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = config_and_inputs
SCREAMING_SNAKE_CASE__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
lowerCamelCase__ : Optional[int] = (
{"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ : List[str] = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : Dict = False
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TimesformerModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(
self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def lowercase_ ( self , A_ , A_ , A_=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = copy.deepcopy(A_ )
if return_labels:
if model_class in get_values(A_ ):
SCREAMING_SNAKE_CASE__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A_ )
return inputs_dict
def lowercase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''TimeSformer does not use inputs_embeds''' )
def lowercase_ ( self ):
'''simple docstring'''
pass
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(A_ )
SCREAMING_SNAKE_CASE__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*A_ )
@slow
def lowercase_ ( self ):
'''simple docstring'''
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = TimesformerModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def lowercase_ ( self ):
'''simple docstring'''
if not self.has_attentions:
pass
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = self.model_tester.seq_length
SCREAMING_SNAKE_CASE__ = self.model_tester.num_frames
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**self._prepare_for_class(A_ , A_ ) )
SCREAMING_SNAKE_CASE__ = outputs.attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**self._prepare_for_class(A_ , A_ ) )
SCREAMING_SNAKE_CASE__ = outputs.attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
SCREAMING_SNAKE_CASE__ = len(A_ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**self._prepare_for_class(A_ , A_ ) )
self.assertEqual(out_len + 1 , len(A_ ) )
SCREAMING_SNAKE_CASE__ = outputs.attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def lowercase_ ( self ):
'''simple docstring'''
def check_hidden_states_output(A_ , A_ , A_ ):
SCREAMING_SNAKE_CASE__ = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**self._prepare_for_class(A_ , A_ ) )
SCREAMING_SNAKE_CASE__ = outputs.hidden_states
SCREAMING_SNAKE_CASE__ = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(A_ ) , A_ )
SCREAMING_SNAKE_CASE__ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = True
check_hidden_states_output(A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ = True
check_hidden_states_output(A_ , A_ , A_ )
def __snake_case ( ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
SCREAMING_SNAKE_CASE__ = np.load(lowerCAmelCase_ )
return list(lowerCAmelCase_ )
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase_ ( self ):
'''simple docstring'''
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to(
A_ )
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = prepare_video()
SCREAMING_SNAKE_CASE__ = image_processor(video[:8] , return_tensors='''pt''' ).to(A_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**A_ )
# verify the logits
SCREAMING_SNAKE_CASE__ = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , A_ )
SCREAMING_SNAKE_CASE__ = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) )
| 100 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''',
'''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''',
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''',
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''',
'''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'''
),
'''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class __lowerCamelCase (_a ):
_lowercase = """bert"""
def __init__( self: Any,A_: Dict=3_0522,A_: Optional[Any]=768,A_: Union[str, Any]=12,A_: List[Any]=12,A_: Optional[int]=3072,A_: Union[str, Any]="gelu",A_: List[str]=0.1,A_: Dict=0.1,A_: Optional[int]=512,A_: Optional[Any]=2,A_: Union[str, Any]=0.0_2,A_: List[Any]=1E-12,A_: Optional[int]=0,A_: List[Any]="absolute",A_: str=True,A_: Union[str, Any]=None,**A_: int,):
'''simple docstring'''
super().__init__(pad_token_id=A_,**A_ )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = position_embedding_type
__UpperCamelCase = use_cache
__UpperCamelCase = classifier_dropout
class __lowerCamelCase (_a ):
@property
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 1 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ : Dict = BlipImageProcessor()
SCREAMING_SNAKE_CASE_ : Optional[int] = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
SCREAMING_SNAKE_CASE_ : str = BertTokenizerFast.from_pretrained('hf-internal-testing/tiny-random-bert' )
SCREAMING_SNAKE_CASE_ : Optional[int] = InstructBlipProcessor(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self , **lowerCAmelCase__ ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ).tokenizer
def UpperCamelCase__ ( self , **lowerCAmelCase__ ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ).image_processor
def UpperCamelCase__ ( self , **lowerCAmelCase__ ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ).qformer_tokenizer
def UpperCamelCase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE_ : Optional[int] = [Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : str = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
SCREAMING_SNAKE_CASE_ : int = self.get_image_processor(do_normalize=lowerCAmelCase__ , padding_value=1.0 )
SCREAMING_SNAKE_CASE_ : Any = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowerCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase__ )
self.assertIsInstance(processor.qformer_tokenizer , lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Dict = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE_ : str = InstructBlipProcessor(
tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ , qformer_tokenizer=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : List[str] = image_processor(lowerCAmelCase__ , return_tensors='np' )
SCREAMING_SNAKE_CASE_ : Optional[int] = processor(images=lowerCAmelCase__ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE_ : Any = InstructBlipProcessor(
tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ , qformer_tokenizer=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = 'lower newer'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor(text=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer(lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = qformer_tokenizer(lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['qformer_' + key] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : str = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE_ : Tuple = InstructBlipProcessor(
tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ , qformer_tokenizer=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = 'lower newer'
SCREAMING_SNAKE_CASE_ : int = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : List[str] = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : str = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : List[str] = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE_ : List[str] = InstructBlipProcessor(
tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ , qformer_tokenizer=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor.batch_decode(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = tokenizer.batch_decode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : str = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE_ : Tuple = InstructBlipProcessor(
tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ , qformer_tokenizer=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = 'lower newer'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : Any = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
| 101 |
def _A ( _lowercase ) -> int:
"""simple docstring"""
assert column_title.isupper()
__UpperCamelCase = 0
__UpperCamelCase = len(_lowercase ) - 1
__UpperCamelCase = 0
while index >= 0:
__UpperCamelCase = (ord(column_title[index] ) - 64) * pow(26 , _lowercase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 | 0 |
"""simple docstring"""
from __future__ import annotations
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , ):
UpperCamelCase : int = cipher_alphabet or [chr(SCREAMING_SNAKE_CASE ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
UpperCamelCase : Any = {
"""a""": 0.0_84_97,
"""b""": 0.0_14_92,
"""c""": 0.0_22_02,
"""d""": 0.0_42_53,
"""e""": 0.1_11_62,
"""f""": 0.0_22_28,
"""g""": 0.0_20_15,
"""h""": 0.0_60_94,
"""i""": 0.0_75_46,
"""j""": 0.0_01_53,
"""k""": 0.0_12_92,
"""l""": 0.0_40_25,
"""m""": 0.0_24_06,
"""n""": 0.0_67_49,
"""o""": 0.0_75_07,
"""p""": 0.0_19_29,
"""q""": 0.0_00_95,
"""r""": 0.0_75_87,
"""s""": 0.0_63_27,
"""t""": 0.0_93_56,
"""u""": 0.0_27_58,
"""v""": 0.0_09_78,
"""w""": 0.0_25_60,
"""x""": 0.0_01_50,
"""y""": 0.0_19_94,
"""z""": 0.0_00_77,
}
else:
# Custom frequencies dictionary
UpperCamelCase : List[Any] = frequencies_dict
if not case_sensitive:
UpperCamelCase : Optional[Any] = ciphertext.lower()
# Chi squared statistic values
UpperCamelCase : dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase : List[str] = """"""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
UpperCamelCase : Any = (alphabet_letters.index(letter.lower() ) - shift) % len(
SCREAMING_SNAKE_CASE )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
UpperCamelCase : str = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
UpperCamelCase : str = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
UpperCamelCase : str = decrypted_with_shift.lower().count(SCREAMING_SNAKE_CASE )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCamelCase : Optional[Any] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCamelCase : str = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
UpperCamelCase : Dict = decrypted_with_shift.count(SCREAMING_SNAKE_CASE )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCamelCase : int = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCamelCase : List[str] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
UpperCamelCase : Optional[Any] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(SCREAMING_SNAKE_CASE ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
UpperCamelCase : int = min(
SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Union[str, Any] = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 102 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _A ( ) -> int:
"""simple docstring"""
__UpperCamelCase = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
__UpperCamelCase = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ).convert('RGB' )
return image
def _A ( _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def _A ( _lowercase , _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = dct.pop(_lowercase )
__UpperCamelCase = val
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__UpperCamelCase = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
__UpperCamelCase = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__UpperCamelCase = torch.cat((q_bias, torch.zeros_like(_lowercase , requires_grad=_lowercase ), v_bias) )
__UpperCamelCase = qkv_bias
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
__UpperCamelCase = 3_64 if 'coco' in model_name else 2_24
__UpperCamelCase = BlipaVisionConfig(image_size=_lowercase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__UpperCamelCase = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=_lowercase ).to_dict()
elif "opt-6.7b" in model_name:
__UpperCamelCase = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=_lowercase ).to_dict()
elif "t5-xl" in model_name:
__UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
__UpperCamelCase = BlipaConfig(vision_config=_lowercase , text_config=_lowercase )
return config, image_size
@torch.no_grad()
def _A ( _lowercase , _lowercase=None , _lowercase=False ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
__UpperCamelCase = tokenizer('\n' , add_special_tokens=_lowercase ).input_ids[0]
__UpperCamelCase, __UpperCamelCase = get_blipa_config(_lowercase , eos_token_id=_lowercase )
__UpperCamelCase = BlipaForConditionalGeneration(_lowercase ).eval()
__UpperCamelCase = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
__UpperCamelCase, __UpperCamelCase = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
__UpperCamelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = load_model_and_preprocess(
name=_lowercase , model_type=_lowercase , is_eval=_lowercase , device=_lowercase )
original_model.eval()
print('Done!' )
# update state dict keys
__UpperCamelCase = original_model.state_dict()
__UpperCamelCase = create_rename_keys(_lowercase )
for src, dest in rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__UpperCamelCase = state_dict.pop(_lowercase )
if key.startswith('Qformer.bert' ):
__UpperCamelCase = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
__UpperCamelCase = key.replace('self' , 'attention' )
if "opt_proj" in key:
__UpperCamelCase = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
__UpperCamelCase = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
__UpperCamelCase = key.replace('opt' , 'language' )
if key.startswith('t5' ):
__UpperCamelCase = key.replace('t5' , 'language' )
__UpperCamelCase = val
# read in qv biases
read_in_q_v_bias(_lowercase , _lowercase )
__UpperCamelCase, __UpperCamelCase = hf_model.load_state_dict(_lowercase , strict=_lowercase )
assert len(_lowercase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__UpperCamelCase = load_demo_image()
__UpperCamelCase = vis_processors['eval'](_lowercase ).unsqueeze(0 ).to(_lowercase )
__UpperCamelCase = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(_lowercase )
# create processor
__UpperCamelCase = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=_lowercase , image_std=_lowercase )
__UpperCamelCase = BlipaProcessor(image_processor=_lowercase , tokenizer=_lowercase )
__UpperCamelCase = processor(images=_lowercase , return_tensors='pt' ).pixel_values.to(_lowercase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowercase , _lowercase )
original_model.to(_lowercase )
hf_model.to(_lowercase )
with torch.no_grad():
if "opt" in model_name:
__UpperCamelCase = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
__UpperCamelCase = hf_model(_lowercase , _lowercase ).logits
else:
__UpperCamelCase = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
__UpperCamelCase = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
__UpperCamelCase = hf_model(_lowercase , _lowercase , labels=_lowercase ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__UpperCamelCase = torch.tensor(
[[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=_lowercase )
assert torch.allclose(logits[0, :3, :3] , _lowercase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__UpperCamelCase = torch.tensor(
[[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=_lowercase )
else:
# cast to same type
__UpperCamelCase = logits.dtype
assert torch.allclose(original_logits.to(_lowercase ) , _lowercase , atol=1e-2 )
print('Looks ok!' )
print('Generating a caption...' )
__UpperCamelCase = ''
__UpperCamelCase = tokenizer(_lowercase , return_tensors='pt' ).input_ids.to(_lowercase )
__UpperCamelCase = original_model.generate({'image': original_pixel_values} )
__UpperCamelCase = hf_model.generate(
_lowercase , _lowercase , do_sample=_lowercase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , _lowercase )
__UpperCamelCase = input_ids.shape[1]
__UpperCamelCase = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowercase )
__UpperCamelCase = [text.strip() for text in output_text]
print('HF generation:' , _lowercase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowercase )
hf_model.save_pretrained(_lowercase )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
__snake_case = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
__snake_case = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 1 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
def __init__( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[Any]=7 , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : str=1_8 , __lowerCamelCase : Any=3_0 , __lowerCamelCase : Any=4_0_0 , __lowerCamelCase : Tuple=True , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Dict=True , __lowerCamelCase : Tuple=None , __lowerCamelCase : Tuple=True , __lowerCamelCase : List[str]=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , __lowerCamelCase : List[Any]=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , __lowerCamelCase : int=True , ):
"""simple docstring"""
_snake_case = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
_snake_case = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8}
_snake_case = parent
_snake_case = batch_size
_snake_case = num_channels
_snake_case = image_size
_snake_case = min_resolution
_snake_case = max_resolution
_snake_case = do_resize
_snake_case = size
_snake_case = do_center_crop
_snake_case = crop_size
_snake_case = do_normalize
_snake_case = image_mean
_snake_case = image_std
_snake_case = do_convert_rgb
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : List[str]=False , __lowerCamelCase : int=False , __lowerCamelCase : Any=False ):
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
_snake_case = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
_snake_case = []
for i in range(self.batch_size ):
_snake_case , _snake_case = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
_snake_case = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
if torchify:
_snake_case = [torch.from_numpy(__lowerCamelCase ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE,unittest.TestCase ):
A__ : Dict = ChineseCLIPImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_snake_case = ChineseCLIPImageProcessingTester(self , do_center_crop=__lowerCamelCase )
@property
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
_snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''size''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''center_crop''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_convert_rgb''' ) )
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
_snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 2_2_4, '''width''': 2_2_4} )
self.assertEqual(image_processor.crop_size , {'''height''': 1_8, '''width''': 1_8} )
_snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2} )
self.assertEqual(image_processor.crop_size , {'''height''': 8_4, '''width''': 8_4} )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
pass
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
# Initialize image_processing
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case = self.image_processor_tester.prepare_inputs(equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_snake_case = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
# Initialize image_processing
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case = self.image_processor_tester.prepare_inputs(equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_snake_case = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
# Initialize image_processing
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case = self.image_processor_tester.prepare_inputs(equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_snake_case = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
@require_torch
@require_vision
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE,unittest.TestCase ):
A__ : int = ChineseCLIPImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_snake_case = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__lowerCamelCase )
_snake_case = 3
@property
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
_snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''size''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''center_crop''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_convert_rgb''' ) )
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
pass
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
# Initialize image_processing
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case = self.image_processor_tester.prepare_inputs(equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_snake_case = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 103 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__snake_case = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
_lowercase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_lowercase = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_lowercase = field(default=_a , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_lowercase = field(
default=_a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __lowerCamelCase :
_lowercase = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
_lowercase = field(
default=_a , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
_lowercase = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_lowercase = field(
default=_a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _A ( ) -> str:
"""simple docstring"""
__UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
__UpperCamelCase = import_module('tasks' )
try:
__UpperCamelCase = getattr(_lowercase , model_args.task_type )
__UpperCamelCase = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _lowercase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
__UpperCamelCase = token_classification_task.get_labels(data_args.labels )
__UpperCamelCase = dict(enumerate(_lowercase ) )
__UpperCamelCase = len(_lowercase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowercase , idalabel=_lowercase , labelaid={label: i for i, label in enumerate(_lowercase )} , cache_dir=model_args.cache_dir , )
__UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
__UpperCamelCase = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , )
# Get datasets
__UpperCamelCase = (
TokenClassificationDataset(
token_classification_task=_lowercase , data_dir=data_args.data_dir , tokenizer=_lowercase , labels=_lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__UpperCamelCase = (
TokenClassificationDataset(
token_classification_task=_lowercase , data_dir=data_args.data_dir , tokenizer=_lowercase , labels=_lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(_lowercase , _lowercase ) -> Tuple[List[int], List[int]]:
__UpperCamelCase = np.argmax(_lowercase , axis=2 )
__UpperCamelCase, __UpperCamelCase = preds.shape
__UpperCamelCase = [[] for _ in range(_lowercase )]
__UpperCamelCase = [[] for _ in range(_lowercase )]
for i in range(_lowercase ):
for j in range(_lowercase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(_lowercase ) -> Dict:
__UpperCamelCase, __UpperCamelCase = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(_lowercase , _lowercase ),
"precision": precision_score(_lowercase , _lowercase ),
"recall": recall_score(_lowercase , _lowercase ),
"f1": fa_score(_lowercase , _lowercase ),
}
# Data collator
__UpperCamelCase = DataCollatorWithPadding(_lowercase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__UpperCamelCase = Trainer(
model=_lowercase , args=_lowercase , train_dataset=_lowercase , eval_dataset=_lowercase , compute_metrics=_lowercase , data_collator=_lowercase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCamelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__UpperCamelCase = trainer.evaluate()
__UpperCamelCase = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(_lowercase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , _lowercase , _lowercase )
writer.write('%s = %s\n' % (key, value) )
results.update(_lowercase )
# Predict
if training_args.do_predict:
__UpperCamelCase = TokenClassificationDataset(
token_classification_task=_lowercase , data_dir=data_args.data_dir , tokenizer=_lowercase , labels=_lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = trainer.predict(_lowercase )
__UpperCamelCase, __UpperCamelCase = align_predictions(_lowercase , _lowercase )
__UpperCamelCase = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(_lowercase , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , _lowercase , _lowercase )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
__UpperCamelCase = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(_lowercase , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(_lowercase , _lowercase , _lowercase )
return results
def _A ( _lowercase ) -> Dict:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 1 | 0 |
"""simple docstring"""
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
pass
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
pass
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self ) -> List[str]:
A__ = [
[],
[],
[],
]
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> None:
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError("Maximum queue size is 100" )
self.queues[priority].append(SCREAMING_SNAKE_CASE__ )
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2" )
def snake_case__ ( self ) -> int:
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("All queues are empty" )
def __str__( self ) -> str:
return "\n".join(f"""Priority {i}: {q}""" for i, q in enumerate(self.queues ) )
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self ) -> Optional[Any]:
A__ = []
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> None:
if len(self.queue ) == 100:
raise OverFlowError("Maximum queue size is 100" )
self.queue.append(SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> int:
if not self.queue:
raise UnderFlowError("The queue is empty" )
else:
A__ = min(self.queue )
self.queue.remove(SCREAMING_SNAKE_CASE__ )
return data
def __str__( self ) -> str:
return str(self.queue )
def _lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
A__ = FixedPriorityQueue()
fpq.enqueue(0, 10 )
fpq.enqueue(1, 70 )
fpq.enqueue(0, 100 )
fpq.enqueue(2, 1 )
fpq.enqueue(2, 5 )
fpq.enqueue(1, 7 )
fpq.enqueue(2, 4 )
fpq.enqueue(1, 64 )
fpq.enqueue(0, 128 )
print(UpperCAmelCase_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(UpperCAmelCase_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def _lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
A__ = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(UpperCAmelCase_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(UpperCAmelCase_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 104 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def _A ( *_lowercase ) -> Tuple:
"""simple docstring"""
with open(_lowercase , 'r' ) as fh:
fcntl.flock(_lowercase , fcntl.LOCK_EX )
try:
print(*_lowercase )
finally:
fcntl.flock(_lowercase , fcntl.LOCK_UN )
__snake_case = int(os.environ['''LOCAL_RANK'''])
torch.cuda.set_device(local_rank)
__snake_case = torch.device('''cuda''', local_rank)
__snake_case = socket.gethostname()
__snake_case = f"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group('''nccl''')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__snake_case = dist.get_rank()
__snake_case = dist.get_world_size()
printflock(f"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(f"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(f"""{gpu} is broken""")
raise
| 1 | 0 |
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class lowerCAmelCase_ ( lowerCamelCase_ ):
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type ,pa.intaa() )
def snake_case ( self ):
with self.assertRaises(snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = pa.array(TypedSequence([1, 2, 3] ) ,type=pa.intaa() )
def snake_case ( self ):
with self.assertRaises(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = pa.array(TypedSequence([1, 2, 3] ,try_type=Value('bool' ) ,type=Value('int64' ) ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = pa.array(TypedSequence([1, 2, 3] ,type=Value('int32' ) ) )
self.assertEqual(arr.type ,pa.intaa() )
def snake_case ( self ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
SCREAMING_SNAKE_CASE_ : Any = pa.array(TypedSequence(['foo', 'bar'] ,type=Value('int64' ) ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = pa.array(TypedSequence([1, 2, 3] ,try_type=Value('int32' ) ) )
self.assertEqual(arr.type ,pa.intaa() )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pa.array(TypedSequence(['foo', 'bar'] ,try_type=Value('int64' ) ) )
self.assertEqual(arr.type ,pa.string() )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = pa.array(TypedSequence([[[1, 2, 3]]] ,type=ArrayaD((1, 3) ,'int64' ) ) )
self.assertEqual(arr.type ,ArrayaDExtensionType((1, 3) ,'int64' ) )
def snake_case ( self ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
SCREAMING_SNAKE_CASE_ : Any = pa.array(TypedSequence(['foo', 'bar'] ,type=ArrayaD((1, 3) ,'int64' ) ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pa.array(TypedSequence([[[1, 2, 3]]] ,try_type=ArrayaD((1, 3) ,'int64' ) ) )
self.assertEqual(arr.type ,ArrayaDExtensionType((1, 3) ,'int64' ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = pa.array(TypedSequence(['foo', 'bar'] ,try_type=ArrayaD((1, 3) ,'int64' ) ) )
self.assertEqual(arr.type ,pa.string() )
@require_pil
def snake_case ( self ):
import PIL.Image
SCREAMING_SNAKE_CASE_ : Union[str, Any] = PIL.Image.fromarray(np.arange(10 ,dtype=np.uinta ).reshape(2 ,5 ) )
with patch(
'datasets.arrow_writer.cast_to_python_objects' ,side_effect=snake_case__ ) as mock_cast_to_python_objects:
SCREAMING_SNAKE_CASE_ : Optional[int] = pa.array(TypedSequence([{'path': None, 'bytes': b'image_bytes'}, pil_image] ,type=Image() ) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn('optimize_list_casting' ,snake_case__ )
self.assertFalse(kwargs['optimize_list_casting'] )
def __UpperCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : int ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = pa.BufferReader(lowerCamelCase_ ) if isinstance(lowerCamelCase_ , pa.Buffer ) else pa.memory_map(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = pa.ipc.open_stream(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : pa.Table = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 10] )
@pytest.mark.parametrize(
'fields' , [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Dict ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = pa.BufferOutputStream()
SCREAMING_SNAKE_CASE_ : Any = pa.schema(lowerCamelCase_ ) if fields else None
with ArrowWriter(stream=lowerCamelCase_ , schema=lowerCamelCase_ , writer_batch_size=lowerCamelCase_ ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE_ : str = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCamelCase_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __UpperCAmelCase ( ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = pa.BufferOutputStream()
SCREAMING_SNAKE_CASE_ : int = Features({'labels': ClassLabel(names=['neg', 'pos'] )} )
with ArrowWriter(stream=lowerCamelCase_ , features=lowerCamelCase_ ) as writer:
writer.write({'labels': 0} )
writer.write({'labels': 1} )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
SCREAMING_SNAKE_CASE_ : List[str] = pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE_ : Tuple = pa.ipc.open_stream(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : pa.Table = f.read_all()
SCREAMING_SNAKE_CASE_ : int = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(lowerCamelCase_ )
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 10] )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCamelCase_ , writer_batch_size=lowerCamelCase_ , hash_salt='split_name' , check_duplicates=lowerCamelCase_ , ) as writer:
with pytest.raises(lowerCamelCase_ ):
writer.write({'col_1': 'foo', 'col_2': 1} , key=[1, 2] )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = writer.finalize()
@pytest.mark.parametrize('writer_batch_size' , [None, 2, 10] )
def __UpperCAmelCase ( lowerCamelCase_ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCamelCase_ , writer_batch_size=lowerCamelCase_ , hash_salt='split_name' , check_duplicates=lowerCamelCase_ , ) as writer:
with pytest.raises(lowerCamelCase_ ):
writer.write({'col_1': 'foo', 'col_2': 1} , key=10 )
writer.write({'col_1': 'bar', 'col_2': 2} , key=10 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = writer.finalize()
@pytest.mark.parametrize('writer_batch_size' , [None, 2, 10] )
def __UpperCAmelCase ( lowerCamelCase_ : Any ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCamelCase_ , writer_batch_size=lowerCamelCase_ , hash_salt='split_name' , check_duplicates=lowerCamelCase_ , ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} , key=1 )
writer.write({'col_1': 'bar', 'col_2': 2} , key=2 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 10] )
@pytest.mark.parametrize(
'fields' , [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = pa.BufferOutputStream()
SCREAMING_SNAKE_CASE_ : int = pa.schema(lowerCamelCase_ ) if fields else None
with ArrowWriter(stream=lowerCamelCase_ , schema=lowerCamelCase_ , writer_batch_size=lowerCamelCase_ ) as writer:
writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} )
writer.write_batch({'col_1': [], 'col_2': []} )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE_ : str = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCamelCase_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 10] )
@pytest.mark.parametrize(
'fields' , [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = pa.BufferOutputStream()
SCREAMING_SNAKE_CASE_ : int = pa.schema(lowerCamelCase_ ) if fields else None
with ArrowWriter(stream=lowerCamelCase_ , schema=lowerCamelCase_ , writer_batch_size=lowerCamelCase_ ) as writer:
writer.write_table(pa.Table.from_pydict({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} ) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCamelCase_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 10] )
@pytest.mark.parametrize(
'fields' , [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = pa.BufferOutputStream()
SCREAMING_SNAKE_CASE_ : Optional[int] = pa.schema(lowerCamelCase_ ) if fields else None
with ArrowWriter(stream=lowerCamelCase_ , schema=lowerCamelCase_ , writer_batch_size=lowerCamelCase_ ) as writer:
writer.write_row(pa.Table.from_pydict({'col_1': ['foo'], 'col_2': [1]} ) )
writer.write_row(pa.Table.from_pydict({'col_1': ['bar'], 'col_2': [2]} ) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE_ : Any = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCamelCase_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_ : Optional[int] = {'col_1': pa.string(), 'col_2': pa.intaa()}
SCREAMING_SNAKE_CASE_ : Any = os.path.join(lowerCamelCase_ , 'test.arrow' )
with ArrowWriter(path=lowerCamelCase_ , schema=pa.schema(lowerCamelCase_ ) ) as writer:
writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(lowerCamelCase_ , metadata=writer._schema.metadata )
_check_output(lowerCamelCase_ , 1 )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if pa.types.is_list(lowerCamelCase_ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def __UpperCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict ) -> List[Any]:
"""simple docstring"""
if isinstance(lst[0] , lowerCamelCase_ ):
change_first_primitive_element_in_list(lst[0] , lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE_ : int = value
@pytest.mark.parametrize('optimized_int_type, expected_dtype' , [(None, pa.intaa()), (Value('int32' ), pa.intaa())] )
@pytest.mark.parametrize('sequence' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = pa.array(TypedSequence(lowerCamelCase_ , optimized_int_type=lowerCamelCase_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
'col, expected_dtype' , [
('attention_mask', pa.inta()),
('special_tokens_mask', pa.inta()),
('token_type_ids', pa.inta()),
('input_ids', pa.intaa()),
('other', pa.intaa()),
] , )
@pytest.mark.parametrize('sequence' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = pa.array(OptimizedTypedSequence(lowerCamelCase_ , col=lowerCamelCase_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
SCREAMING_SNAKE_CASE_ : Union[str, Any] = copy.deepcopy(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = pa.array(OptimizedTypedSequence(lowerCamelCase_ , col=lowerCamelCase_ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize('raise_exception' , [False, True] )
def __UpperCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = str(tmp_path / 'dataset-train.arrow' )
try:
with ArrowWriter(path=lowerCamelCase_ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = 'mock://dataset-train.arrow'
with ArrowWriter(path=lowerCamelCase_ , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(lowerCamelCase_ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(lowerCamelCase_ )
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pa.BufferOutputStream()
with ParquetWriter(stream=lowerCamelCase_ ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
SCREAMING_SNAKE_CASE_ : Optional[Any] = pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE_ : pa.Table = pq.read_table(lowerCamelCase_ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize('embed_local_files' , [False, True] )
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Any ) -> Union[str, Any]:
"""simple docstring"""
import PIL.Image
SCREAMING_SNAKE_CASE_ : Tuple = str(tmp_path / 'test_image_rgb.jpg' )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(lowerCamelCase_ , format='png' )
SCREAMING_SNAKE_CASE_ : Optional[int] = pa.BufferOutputStream()
with ParquetWriter(
stream=lowerCamelCase_ , features=Features({'image': Image()} ) , embed_local_files=lowerCamelCase_ ) as writer:
writer.write({'image': image_path} )
writer.finalize()
SCREAMING_SNAKE_CASE_ : Tuple = pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE_ : pa.Table = pq.read_table(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out['image'][0]['path'] , lowerCamelCase_ )
with open(lowerCamelCase_ , 'rb' ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def __UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = pa.schema([pa.field('col_1' , pa.string() , nullable=lowerCamelCase_ )] )
SCREAMING_SNAKE_CASE_ : List[Any] = pa.BufferOutputStream()
with ArrowWriter(stream=lowerCamelCase_ ) as writer:
writer._build_writer(inferred_schema=lowerCamelCase_ )
assert writer._schema == pa.schema([pa.field('col_1' , pa.string() )] )
| 105 |
import pytest
import datasets
# Import fixture modules as plugins
__snake_case = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def _A ( _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def _A ( _lowercase ) -> str:
"""simple docstring"""
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=_lowercase )
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
__UpperCamelCase = tmp_path_factory.getbasetemp() / 'cache'
__UpperCamelCase = test_hf_cache_home / 'datasets'
__UpperCamelCase = test_hf_cache_home / 'metrics'
__UpperCamelCase = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(_lowercase ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(_lowercase ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(_lowercase ) )
__UpperCamelCase = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(_lowercase ) )
__UpperCamelCase = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_lowercase ) )
@pytest.fixture(autouse=_lowercase , scope='session' )
def _A ( ) -> Dict:
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=_lowercase )
def _A ( _lowercase ) -> Tuple:
"""simple docstring"""
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , _lowercase )
@pytest.fixture
def _A ( _lowercase ) -> Any:
"""simple docstring"""
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , _lowercase )
| 1 | 0 |
__snake_case :Optional[Any] ={
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def lowerCamelCase_ ( lowerCAmelCase__ : dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Any ) -> list[str]:
'''simple docstring'''
A = set()
# keep track of all the paths to be checked
A = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
A = queue.pop(0 )
# get the last node from the path
A = path[-1]
if node not in explored:
A = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
A = list(lowerCAmelCase__ )
new_path.append(lowerCAmelCase__ )
queue.append(lowerCAmelCase__ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(lowerCAmelCase__ )
# in case there's no path between the 2 nodes
return []
def lowerCamelCase_ ( lowerCAmelCase__ : dict , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple ) -> int:
'''simple docstring'''
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
A = [start]
A = set(lowerCAmelCase__ )
# Keep tab on distances from `start` node.
A = {start: 0, target: -1}
while queue:
A = queue.pop(0 )
if node == target:
A = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(lowerCAmelCase__ )
queue.append(lowerCAmelCase__ )
A = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4 | 106 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = VideoToVideoSDPipeline
_lowercase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"""video"""} ) - {"""image""", """width""", """height"""}
_lowercase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""video"""} ) - {"""image"""}
_lowercase = PipelineTesterMixin.required_optional_params - {"""latents"""}
_lowercase = False
# No `output_type`.
_lowercase = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64),layers_per_block=2,sample_size=32,in_channels=4,out_channels=4,down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D'),up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D'),cross_attention_dim=32,attention_head_dim=4,)
__UpperCamelCase = DDIMScheduler(
beta_start=0.0_0_0_8_5,beta_end=0.0_1_2,beta_schedule='scaled_linear',clip_sample=A_,set_alpha_to_one=A_,)
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64],in_channels=3,out_channels=3,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'],up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'],latent_channels=4,sample_size=128,)
torch.manual_seed(0 )
__UpperCamelCase = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,hidden_act='gelu',projection_dim=512,)
__UpperCamelCase = CLIPTextModel(A_ )
__UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__UpperCamelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def snake_case_ ( self: Union[str, Any],A_: Any,A_: Any=0 ):
'''simple docstring'''
__UpperCamelCase = floats_tensor((1, 3, 3, 32, 32),rng=random.Random(A_ ) ).to(A_ )
if str(A_ ).startswith('mps' ):
__UpperCamelCase = torch.manual_seed(A_ )
else:
__UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
__UpperCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'video': video,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = VideoToVideoSDPipeline(**A_ )
__UpperCamelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = self.get_dummy_inputs(A_ )
__UpperCamelCase = 'np'
__UpperCamelCase = sd_pipe(**A_ ).frames
__UpperCamelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
__UpperCamelCase = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(),reason='XFormers attention is only available with CUDA and `xformers` installed',)
def snake_case_ ( self: Any ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A_,expected_max_diff=5E-3 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def snake_case_ ( self: str ):
'''simple docstring'''
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def snake_case_ ( self: int ):
'''simple docstring'''
pass
def snake_case_ ( self: Any ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL',torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
__UpperCamelCase = torch.Generator(device='cpu' ).manual_seed(0 )
__UpperCamelCase = torch.randn((1, 10, 3, 1024, 576),generator=A_ )
__UpperCamelCase = video.to('cuda' )
__UpperCamelCase = 'Spiderman is surfing'
__UpperCamelCase = pipe(A_,video=A_,generator=A_,num_inference_steps=3,output_type='pt' ).frames
__UpperCamelCase = np.array([-1.0_4_5_8_9_8_4, -1.1_2_7_9_2_9_7, -0.9_6_6_3_0_8_6, -0.9_1_5_0_3_9_0_6, -0.7_5_0_9_7_6_5_6] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 1 | 0 |
'''simple docstring'''
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[int] ) -> None:
_A = {} # Mapping from char to TrieNode
_A = False
def __UpperCAmelCase ( self : List[str], UpperCamelCase__ : list[str] ) -> None:
for word in words:
self.insert(UpperCamelCase__ )
def __UpperCAmelCase ( self : Optional[int], UpperCamelCase__ : str ) -> None:
_A = self
for char in word:
if char not in curr.nodes:
_A = TrieNode()
_A = curr.nodes[char]
_A = True
def __UpperCAmelCase ( self : str, UpperCamelCase__ : str ) -> bool:
_A = self
for char in word:
if char not in curr.nodes:
return False
_A = curr.nodes[char]
return curr.is_leaf
def __UpperCAmelCase ( self : Union[str, Any], UpperCamelCase__ : str ) -> None:
def _delete(UpperCamelCase__ : TrieNode, UpperCamelCase__ : str, UpperCamelCase__ : int ) -> bool:
if index == len(UpperCamelCase__ ):
# If word does not exist
if not curr.is_leaf:
return False
_A = False
return len(curr.nodes ) == 0
_A = word[index]
_A = curr.nodes.get(UpperCamelCase__ )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
_A = _delete(UpperCamelCase__, UpperCamelCase__, index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self, UpperCamelCase__, 0 )
def _SCREAMING_SNAKE_CASE ( __snake_case : TrieNode , __snake_case : str ):
if node.is_leaf:
print(__snake_case , end=' ' )
for key, value in node.nodes.items():
print_words(__snake_case , word + key )
def _SCREAMING_SNAKE_CASE ( ):
_A = 'banana bananas bandana band apple all beast'.split()
_A = TrieNode()
root.insert_many(__snake_case )
# print_words(root, "")
assert all(root.find(__snake_case ) for word in words )
assert root.find('banana' )
assert not root.find('bandanas' )
assert not root.find('apps' )
assert root.find('apple' )
assert root.find('all' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def _SCREAMING_SNAKE_CASE ( __snake_case : str , __snake_case : bool ):
print(str(__snake_case ) , 'works!' if passes else 'doesn\'t work :(' )
def _SCREAMING_SNAKE_CASE ( ):
assert test_trie()
def _SCREAMING_SNAKE_CASE ( ):
print_results('Testing trie functionality' , test_trie() )
if __name__ == "__main__":
main()
| 107 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
__snake_case = parser.parse_args()
__snake_case = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
__snake_case = CLIPImageProcessor()
__snake_case = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
__snake_case = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 1 | 0 |
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> float:
if mass < 0:
raise ValueError("""The mass of a body cannot be negative""" )
return 0.5 * mass * abs(__snake_case ) * abs(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) | 108 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 1 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : Union[str, Any] = BioGptTokenizer
__UpperCamelCase : Optional[Any] = False
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__SCREAMING_SNAKE_CASE = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
__SCREAMING_SNAKE_CASE = dict(zip(lowerCamelCase ,range(len(lowerCamelCase ) ) ) )
__SCREAMING_SNAKE_CASE = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ) as fp:
fp.write(json.dumps(lowerCamelCase ) )
with open(self.merges_file ,"""w""" ) as fp:
fp.write("""\n""".join(lowerCamelCase ) )
def UpperCAmelCase__ ( self : Any ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """lower newer"""
__SCREAMING_SNAKE_CASE = """lower newer"""
return input_text, output_text
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BioGptTokenizer(self.vocab_file ,self.merges_file )
__SCREAMING_SNAKE_CASE = """lower"""
__SCREAMING_SNAKE_CASE = ["""low""", """er</w>"""]
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokens + ["""<unk>"""]
__SCREAMING_SNAKE_CASE = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) ,lowerCamelCase )
@slow
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
__SCREAMING_SNAKE_CASE = tokenizer.encode("""sequence builders""" ,add_special_tokens=lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCamelCase ,lowerCamelCase )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 109 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
__snake_case = '''src/diffusers'''
# Matches is_xxx_available()
__snake_case = re.compile(r'''is\_([a-z_]*)_available\(\)''')
# Matches from xxx import bla
__snake_case = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
__snake_case = '''
{0} = None
'''
__snake_case = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
'''
__snake_case = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
def _A ( _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = _re_backend.findall(_lowercase )
if len(_lowercase ) == 0:
return None
return "_and_".join(_lowercase )
def _A ( ) -> Tuple:
"""simple docstring"""
with open(os.path.join(_lowercase , '__init__.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
__UpperCamelCase = f.readlines()
# Get to the point we do the actual imports for type checking
__UpperCamelCase = 0
__UpperCamelCase = {}
# Go through the end of the file
while line_index < len(_lowercase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
__UpperCamelCase = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('else:' ):
line_index += 1
line_index += 1
__UpperCamelCase = []
# Until we unindent, add backend objects to the list
while line_index < len(_lowercase ) and len(lines[line_index] ) > 1:
__UpperCamelCase = lines[line_index]
__UpperCamelCase = _re_single_line_import.search(_lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(_lowercase ) > 0:
__UpperCamelCase = objects
else:
line_index += 1
return backend_specific_objects
def _A ( _lowercase , _lowercase ) -> Union[str, Any]:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(_lowercase )
elif name.islower():
return DUMMY_FUNCTION.format(_lowercase , _lowercase )
else:
return DUMMY_CLASS.format(_lowercase , _lowercase )
def _A ( _lowercase=None ) -> Optional[Any]:
"""simple docstring"""
if backend_specific_objects is None:
__UpperCamelCase = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
__UpperCamelCase = {}
for backend, objects in backend_specific_objects.items():
__UpperCamelCase = '[' + ', '.join(f'''"{b}"''' for b in backend.split('_and_' ) ) + ']'
__UpperCamelCase = '# This file is autogenerated by the command `make fix-copies`, do not edit.\n'
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(_lowercase , _lowercase ) for o in objects] )
__UpperCamelCase = dummy_file
return dummy_files
def _A ( _lowercase=False ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
__UpperCamelCase = {'torch': 'pt'}
# Locate actual dummy modules and read their content.
__UpperCamelCase = os.path.join(_lowercase , 'utils' )
__UpperCamelCase = {
backend: os.path.join(_lowercase , f'''dummy_{short_names.get(_lowercase , _lowercase )}_objects.py''' )
for backend in dummy_files.keys()
}
__UpperCamelCase = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(_lowercase ):
with open(_lowercase , 'r' , encoding='utf-8' , newline='\n' ) as f:
__UpperCamelCase = f.read()
else:
__UpperCamelCase = ''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f'''Updating diffusers.utils.dummy_{short_names.get(_lowercase , _lowercase )}_objects.py as the main '''
'__init__ has new objects.' )
with open(dummy_file_paths[backend] , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'The main __init__ has objects that are not present in '
f'''diffusers.utils.dummy_{short_names.get(_lowercase , _lowercase )}_objects.py. Run `make fix-copies` '''
'to fix this.' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__snake_case = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 1 | 0 |
"""simple docstring"""
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class a ( unittest.TestCase ):
def __init__( self , UpperCamelCase_ ):
UpperCAmelCase__ : Union[str, Any] = parent
def __snake_case ( self ):
return {}
def lowerCamelCase ( ):
UpperCAmelCase__ : Dict = '<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR="FFFFFF">\n <HR>\n <a href="http://google.com">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style="color:#0000FF">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>'
UpperCAmelCase__ : Optional[int] = '\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n '
return [html_string_a, html_string_a]
@require_bsa
class a ( lowercase , unittest.TestCase ):
UpperCamelCase : str = MarkupLMFeatureExtractor if is_bsa_available() else None
def __snake_case ( self ):
UpperCAmelCase__ : Optional[int] = MarkupLMFeatureExtractionTester(self )
@property
def __snake_case ( self ):
return self.feature_extract_tester.prepare_feat_extract_dict()
def __snake_case ( self ):
# Initialize feature_extractor
UpperCAmelCase__ : Any = self.feature_extraction_class()
# Test not batched input
UpperCAmelCase__ : Tuple = get_html_strings()[0]
UpperCAmelCase__ : Optional[Any] = feature_extractor(UpperCamelCase_ )
# fmt: off
UpperCAmelCase__ : Union[str, Any] = [['sample document', 'Goog', 'This is one header', 'This is a another Header', 'Travel from', 'SFO to JFK', 'on May 2, 2015 at 2:00 pm. For details go to confirm.com', 'Traveler', 'name', 'is', 'John Doe']]
UpperCAmelCase__ : int = [['/html/head/title', '/html/body/a', '/html/body/h1', '/html/body/h2', '/html/body/p', '/html/body/p/p/b[1]', '/html/body/p/p/b[2]/i', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/b', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/p']]
# fmt: on
self.assertEqual(encoding.nodes , UpperCamelCase_ )
self.assertEqual(encoding.xpaths , UpperCamelCase_ )
# Test batched
UpperCAmelCase__ : List[str] = get_html_strings()
UpperCAmelCase__ : List[Any] = feature_extractor(UpperCamelCase_ )
# fmt: off
UpperCAmelCase__ : Union[str, Any] = expected_nodes + [['My First Heading', 'My first paragraph.']]
UpperCAmelCase__ : Dict = expected_xpaths + [['/html/body/h1', '/html/body/p']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , UpperCamelCase_ )
self.assertEqual(encoding.xpaths , UpperCamelCase_ )
| 110 |
import string
def _A ( _lowercase ) -> None:
"""simple docstring"""
for key in range(len(string.ascii_uppercase ) ):
__UpperCamelCase = ''
for symbol in message:
if symbol in string.ascii_uppercase:
__UpperCamelCase = string.ascii_uppercase.find(_lowercase )
__UpperCamelCase = num - key
if num < 0:
__UpperCamelCase = num + len(string.ascii_uppercase )
__UpperCamelCase = translated + string.ascii_uppercase[num]
else:
__UpperCamelCase = translated + symbol
print(f'''Decryption using Key #{key}: {translated}''' )
def _A ( ) -> None:
"""simple docstring"""
__UpperCamelCase = input('Encrypted message: ' )
__UpperCamelCase = message.upper()
decrypt(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 1 | 0 |
from statistics import mean, stdev
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase = 3 ):
A_ = min(_lowercase )
A_ = max(_lowercase )
# normalize data
return [round((x - x_min) / (x_max - x_min) , _lowercase ) for x in data]
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase = 3 ):
A_ = mean(_lowercase )
A_ = stdev(_lowercase )
# standardize data
return [round((x - mu) / (sigma) , _lowercase ) for x in data] | 141 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = KandinskyInpaintPipeline
_lowercase = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
_lowercase = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
_lowercase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_lowercase = False
@property
def snake_case_ ( self: int ):
'''simple docstring'''
return 32
@property
def snake_case_ ( self: str ):
'''simple docstring'''
return 32
@property
def snake_case_ ( self: Tuple ):
'''simple docstring'''
return self.time_input_dim
@property
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
return 100
@property
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def snake_case_ ( self: Any ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = MCLIPConfig(
numDims=self.cross_attention_dim,transformerDimensions=self.text_embedder_hidden_size,hidden_size=self.text_embedder_hidden_size,intermediate_size=37,num_attention_heads=4,num_hidden_layers=5,vocab_size=1005,)
__UpperCamelCase = MultilingualCLIP(A_ )
__UpperCamelCase = text_encoder.eval()
return text_encoder
@property
def snake_case_ ( self: Any ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__UpperCamelCase = UNetaDConditionModel(**A_ )
return model
@property
def snake_case_ ( self: str ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def snake_case_ ( self: str ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = self.dummy_tokenizer
__UpperCamelCase = self.dummy_unet
__UpperCamelCase = self.dummy_movq
__UpperCamelCase = DDIMScheduler(
num_train_timesteps=1000,beta_schedule='linear',beta_start=0.0_0_0_8_5,beta_end=0.0_1_2,clip_sample=A_,set_alpha_to_one=A_,steps_offset=1,prediction_type='epsilon',thresholding=A_,)
__UpperCamelCase = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def snake_case_ ( self: Tuple,A_: Optional[int],A_: Dict=0 ):
'''simple docstring'''
__UpperCamelCase = floats_tensor((1, self.cross_attention_dim),rng=random.Random(A_ ) ).to(A_ )
__UpperCamelCase = floats_tensor((1, self.cross_attention_dim),rng=random.Random(seed + 1 ) ).to(A_ )
# create init_image
__UpperCamelCase = floats_tensor((1, 3, 64, 64),rng=random.Random(A_ ) ).to(A_ )
__UpperCamelCase = image.cpu().permute(0,2,3,1 )[0]
__UpperCamelCase = Image.fromarray(np.uinta(A_ ) ).convert('RGB' ).resize((256, 256) )
# create mask
__UpperCamelCase = np.ones((64, 64),dtype=np.floataa )
__UpperCamelCase = 0
if str(A_ ).startswith('mps' ):
__UpperCamelCase = torch.manual_seed(A_ )
else:
__UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
__UpperCamelCase = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = 'cpu'
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = self.pipeline_class(**A_ )
__UpperCamelCase = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = pipe(**self.get_dummy_inputs(A_ ) )
__UpperCamelCase = output.images
__UpperCamelCase = pipe(
**self.get_dummy_inputs(A_ ),return_dict=A_,)[0]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
print(F'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' )
__UpperCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
__UpperCamelCase = np.ones((768, 768),dtype=np.floataa )
__UpperCamelCase = 0
__UpperCamelCase = 'a hat'
__UpperCamelCase = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior',torch_dtype=torch.floataa )
pipe_prior.to(A_ )
__UpperCamelCase = KandinskyInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-inpaint',torch_dtype=torch.floataa )
__UpperCamelCase = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
__UpperCamelCase = torch.Generator(device='cpu' ).manual_seed(0 )
__UpperCamelCase, __UpperCamelCase = pipe_prior(
A_,generator=A_,num_inference_steps=5,negative_prompt='',).to_tuple()
__UpperCamelCase = pipeline(
A_,image=A_,mask_image=A_,image_embeds=A_,negative_image_embeds=A_,generator=A_,num_inference_steps=100,height=768,width=768,output_type='np',)
__UpperCamelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(A_,A_ )
| 1 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( __A : List[str] = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
snake_case: Union[str, Any] = set()
# Replace all the whitespace in our sentence
snake_case: Dict = input_str.replace(' ' , '' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(_lowercase ) == 26
def lowerCAmelCase_ ( __A : Dict = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
snake_case: Optional[Any] = [False] * 26
for char in input_str:
if char.islower():
snake_case: Union[str, Any] = True
elif char.isupper():
snake_case: str = True
return all(_lowercase )
def lowerCAmelCase_ ( __A : int = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def lowerCAmelCase_ ( ):
'''simple docstring'''
from timeit import timeit
snake_case: Optional[int] = 'from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'
print(timeit('is_pangram()' , setup=_lowercase ) )
print(timeit('is_pangram_faster()' , setup=_lowercase ) )
print(timeit('is_pangram_fastest()' , setup=_lowercase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 329 |
from typing import Any
class __lowerCamelCase :
def __init__( self: int,A_: Any ):
'''simple docstring'''
__UpperCamelCase = data
__UpperCamelCase = None
def __repr__( self: Any ):
'''simple docstring'''
return F'''Node({self.data})'''
class __lowerCamelCase :
def __init__( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = None
def __iter__( self: int ):
'''simple docstring'''
__UpperCamelCase = self.head
while node:
yield node.data
__UpperCamelCase = node.next
def __len__( self: List[str] ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self: Any ):
'''simple docstring'''
return "->".join([str(A_ ) for item in self] )
def __getitem__( self: int,A_: int ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self: int,A_: int,A_: Any ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
__UpperCamelCase = self.head
for _ in range(A_ ):
__UpperCamelCase = current.next
__UpperCamelCase = data
def snake_case_ ( self: Union[str, Any],A_: Any ):
'''simple docstring'''
self.insert_nth(len(self ),A_ )
def snake_case_ ( self: List[Any],A_: Any ):
'''simple docstring'''
self.insert_nth(0,A_ )
def snake_case_ ( self: Optional[Any],A_: int,A_: Any ):
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
__UpperCamelCase = Node(A_ )
if self.head is None:
__UpperCamelCase = new_node
elif index == 0:
__UpperCamelCase = self.head # link new_node to head
__UpperCamelCase = new_node
else:
__UpperCamelCase = self.head
for _ in range(index - 1 ):
__UpperCamelCase = temp.next
__UpperCamelCase = temp.next
__UpperCamelCase = new_node
def snake_case_ ( self: str ): # print every node data
'''simple docstring'''
print(self )
def snake_case_ ( self: int ):
'''simple docstring'''
return self.delete_nth(0 )
def snake_case_ ( self: str ): # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def snake_case_ ( self: Any,A_: int = 0 ):
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
__UpperCamelCase = self.head # default first node
if index == 0:
__UpperCamelCase = self.head.next
else:
__UpperCamelCase = self.head
for _ in range(index - 1 ):
__UpperCamelCase = temp.next
__UpperCamelCase = temp.next
__UpperCamelCase = temp.next.next
return delete_node.data
def snake_case_ ( self: Any ):
'''simple docstring'''
return self.head is None
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = None
__UpperCamelCase = self.head
while current:
# Store the current node's next node.
__UpperCamelCase = current.next
# Make the current node's next point backwards
__UpperCamelCase = prev
# Make the previous node be the current node
__UpperCamelCase = current
# Make the current node the next node (to progress iteration)
__UpperCamelCase = next_node
# Return prev in order to put the head at the end
__UpperCamelCase = prev
def _A ( ) -> None:
"""simple docstring"""
__UpperCamelCase = LinkedList()
assert linked_list.is_empty() is True
assert str(_lowercase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_lowercase ) == i
linked_list.insert_nth(_lowercase , i + 1 )
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_lowercase ) == 9
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
__UpperCamelCase = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(-8 , 1 ) )
def _A ( ) -> None:
"""simple docstring"""
__UpperCamelCase = [
-9,
1_00,
Node(77_34_51_12 ),
'dlrow olleH',
7,
55_55,
0,
-1_92.5_55_55,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
__UpperCamelCase = LinkedList()
for i in test_input:
linked_list.insert_tail(_lowercase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_lowercase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
__UpperCamelCase = linked_list.delete_head()
assert result == -9
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
__UpperCamelCase = linked_list.delete_tail()
assert result == 12.2
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
__UpperCamelCase = linked_list.delete_nth(10 )
assert result is None
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(_lowercase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_lowercase )
assert (
str(_lowercase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_lowercase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _A ( ) -> List[str]:
"""simple docstring"""
from doctest import testmod
testmod()
__UpperCamelCase = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(_lowercase )
print('\nReading/changing Node data using indexing:' )
print(f'''Element at Position 1: {linked_list[1]}''' )
__UpperCamelCase = input('Enter New Value: ' ).strip()
print('New list:' )
print(_lowercase )
print(f'''length of linked_list is : {len(_lowercase )}''' )
if __name__ == "__main__":
main()
| 1 | 0 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->int:
"""simple docstring"""
for param, grad_param in zip(model_a.parameters(), model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase=True ) ->Any:
"""simple docstring"""
model.train()
lowercase : Optional[Any] = model(_lowercase )
lowercase : Any = F.mse_loss(_lowercase, target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(_lowercase )
def __lowercase ( _UpperCamelCase, _UpperCamelCase=False ) ->List[Any]:
"""simple docstring"""
set_seed(42 )
lowercase : List[Any] = RegressionModel()
lowercase : List[Any] = deepcopy(_lowercase )
lowercase : Optional[Any] = RegressionDataset(length=80 )
lowercase : Dict = DataLoader(_lowercase, batch_size=16 )
model.to(accelerator.device )
if sched:
lowercase : Optional[Any] = AdamW(params=model.parameters(), lr=1e-3 )
lowercase : int = AdamW(params=ddp_model.parameters(), lr=1e-3 )
lowercase : Tuple = LambdaLR(_lowercase, lr_lambda=lambda _UpperCamelCase : epoch**0.6_5 )
lowercase : Any = LambdaLR(_lowercase, lr_lambda=lambda _UpperCamelCase : epoch**0.6_5 )
# Make a copy of `model`
if sched:
lowercase , lowercase , lowercase , lowercase : Dict = accelerator.prepare(_lowercase, _lowercase, _lowercase, _lowercase )
else:
lowercase , lowercase : Any = accelerator.prepare(_lowercase, _lowercase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def __lowercase ( _UpperCamelCase ) ->Tuple:
"""simple docstring"""
lowercase , lowercase , lowercase : Dict = get_training_setup(_lowercase )
# Use a single batch
lowercase , lowercase : Any = next(iter(_lowercase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowercase , lowercase : str = accelerator.gather((ddp_input, ddp_target) )
lowercase , lowercase : str = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_lowercase, _lowercase, _lowercase, _lowercase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_lowercase ):
step_model(_lowercase, _lowercase, _lowercase, _lowercase )
else:
# Sync grads
step_model(_lowercase, _lowercase, _lowercase, _lowercase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(_lowercase, _lowercase, _lowercase, _lowercase )
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad, ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowercase : Optional[int] = ddp_input[torch.randperm(len(_lowercase ) )]
def __lowercase ( _UpperCamelCase ) ->Optional[int]:
"""simple docstring"""
lowercase , lowercase , lowercase : List[str] = get_training_setup(_lowercase )
# Use a single batch
lowercase , lowercase : Dict = next(iter(_lowercase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowercase , lowercase : List[str] = accelerator.gather((ddp_input, ddp_target) )
lowercase , lowercase : Optional[Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_lowercase, _lowercase, _lowercase, _lowercase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_lowercase ):
step_model(_lowercase, _lowercase, _lowercase, _lowercase )
else:
# Sync grads
step_model(_lowercase, _lowercase, _lowercase, _lowercase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowercase : List[Any] = ddp_input[torch.randperm(len(_lowercase ) )]
def __lowercase ( _UpperCamelCase=False, _UpperCamelCase=False ) ->str:
"""simple docstring"""
lowercase : Union[str, Any] = Accelerator(
split_batches=_lowercase, dispatch_batches=_lowercase, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowercase , lowercase , lowercase : int = get_training_setup(_lowercase )
for iteration, batch in enumerate(_lowercase ):
lowercase , lowercase : Any = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase , lowercase : int = accelerator.gather((ddp_input, ddp_target) )
lowercase , lowercase : Union[str, Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_lowercase, _lowercase, _lowercase, _lowercase, _lowercase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(_lowercase ):
step_model(_lowercase, _lowercase, _lowercase, _lowercase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(_lowercase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowercase : List[str] = ddp_input[torch.randperm(len(_lowercase ) )]
GradientState._reset_state()
def __lowercase ( _UpperCamelCase=False, _UpperCamelCase=False ) ->List[str]:
"""simple docstring"""
lowercase : Tuple = Accelerator(
split_batches=_lowercase, dispatch_batches=_lowercase, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase : str = get_training_setup(_lowercase, _lowercase )
for iteration, batch in enumerate(_lowercase ):
lowercase , lowercase : Any = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase , lowercase : str = accelerator.gather((ddp_input, ddp_target) )
lowercase , lowercase : Optional[Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(_lowercase, _lowercase, _lowercase, _lowercase, _lowercase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_lowercase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(_lowercase ):
step_model(_lowercase, _lowercase, _lowercase, _lowercase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n"""
lowercase : Optional[Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_lowercase ))
if accelerator.num_processes > 1:
check_model_parameters(_lowercase, _lowercase, _lowercase, _lowercase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def __lowercase ( ) ->Dict:
"""simple docstring"""
lowercase : Dict = Accelerator()
lowercase : List[str] = RegressionDataset(length=80 )
lowercase : Optional[Any] = DataLoader(_lowercase, batch_size=16 )
lowercase : Union[str, Any] = RegressionDataset(length=96 )
lowercase : Tuple = DataLoader(_lowercase, batch_size=16 )
lowercase , lowercase : Optional[int] = accelerator.prepare(_lowercase, _lowercase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(_lowercase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_lowercase )
if iteration < len(_lowercase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(_lowercase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_lowercase )
if batch_num < len(_lowercase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def __lowercase ( ) ->Tuple:
"""simple docstring"""
lowercase : str = Accelerator()
lowercase : Tuple = accelerator.state
if state.local_process_index == 0:
print('''**Test `accumulate` gradient accumulation with dataloader break**''' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('''**Test NOOP `no_sync` context manager**''' )
test_noop_sync(_lowercase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('''**Test Distributed `no_sync` context manager**''' )
test_distributed_sync(_lowercase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation, ''', f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""", )
test_gradient_accumulation(_lowercase, _lowercase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('''<''', '''2.0''' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''', '''`split_batches=False`, `dispatch_batches=False`**''', )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''', f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""", )
test_gradient_accumulation_with_opt_and_scheduler(_lowercase, _lowercase )
def __lowercase ( _UpperCamelCase ) ->str:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 319 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 1 | 0 |
'''simple docstring'''
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class a__( _a ):
@slow
@require_torch
def _lowercase ( self ) -> Optional[int]:
snake_case__ =EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
snake_case__ =BertTokenizer.from_pretrained('bert-base-uncased' )
snake_case__ =bertabert.config.encoder.vocab_size
snake_case__ =tokenizer.sep_token_id
snake_case__ =tokenizer.cls_token_id
snake_case__ =128
snake_case__ =datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
snake_case__ =datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
snake_case__ =train_dataset.select(range(32 ) )
snake_case__ =val_dataset.select(range(16 ) )
snake_case__ =4
def _map_to_encoder_decoder_inputs(_UpperCAmelCase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
snake_case__ =tokenizer(batch['article'] , padding='max_length' , truncation=A_ , max_length=512 )
snake_case__ =tokenizer(batch['highlights'] , padding='max_length' , truncation=A_ , max_length=128 )
snake_case__ =inputs.input_ids
snake_case__ =inputs.attention_mask
snake_case__ =outputs.input_ids
snake_case__ =outputs.input_ids.copy()
snake_case__ =[
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
snake_case__ =outputs.attention_mask
assert all(len(A_ ) == 512 for x in inputs.input_ids )
assert all(len(A_ ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_UpperCAmelCase ):
snake_case__ =pred.label_ids
snake_case__ =pred.predictions
# all unnecessary tokens are removed
snake_case__ =tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
snake_case__ =tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
snake_case__ =sum([int(pred_str[i] == label_str[i] ) for i in range(len(A_ ) )] ) / len(A_ )
return {"accuracy": accuracy}
# map train dataset
snake_case__ =train_dataset.map(
_map_to_encoder_decoder_inputs , batched=A_ , batch_size=A_ , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
snake_case__ =val_dataset.map(
_map_to_encoder_decoder_inputs , batched=A_ , batch_size=A_ , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
snake_case__ =self.get_auto_remove_tmp_dir()
snake_case__ =SeqaSeqTrainingArguments(
output_dir=A_ , per_device_train_batch_size=A_ , per_device_eval_batch_size=A_ , predict_with_generate=A_ , evaluation_strategy='steps' , do_train=A_ , do_eval=A_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
snake_case__ =SeqaSeqTrainer(
model=A_ , args=A_ , compute_metrics=_compute_metrics , train_dataset=A_ , eval_dataset=A_ , tokenizer=A_ , )
# start training
trainer.train()
| 538 |
__snake_case = {
'''a''': '''AAAAA''',
'''b''': '''AAAAB''',
'''c''': '''AAABA''',
'''d''': '''AAABB''',
'''e''': '''AABAA''',
'''f''': '''AABAB''',
'''g''': '''AABBA''',
'''h''': '''AABBB''',
'''i''': '''ABAAA''',
'''j''': '''BBBAA''',
'''k''': '''ABAAB''',
'''l''': '''ABABA''',
'''m''': '''ABABB''',
'''n''': '''ABBAA''',
'''o''': '''ABBAB''',
'''p''': '''ABBBA''',
'''q''': '''ABBBB''',
'''r''': '''BAAAA''',
'''s''': '''BAAAB''',
'''t''': '''BAABA''',
'''u''': '''BAABB''',
'''v''': '''BBBAB''',
'''w''': '''BABAA''',
'''x''': '''BABAB''',
'''y''': '''BABBA''',
'''z''': '''BABBB''',
''' ''': ''' ''',
}
__snake_case = {value: key for key, value in encode_dict.items()}
def _A ( _lowercase ) -> str:
"""simple docstring"""
__UpperCamelCase = ''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('encode() accepts only letters of the alphabet and spaces' )
return encoded
def _A ( _lowercase ) -> str:
"""simple docstring"""
if set(_lowercase ) - {"A", "B", " "} != set():
raise Exception('decode() accepts only \'A\', \'B\' and spaces' )
__UpperCamelCase = ''
for word in coded.split():
while len(_lowercase ) != 0:
decoded += decode_dict[word[:5]]
__UpperCamelCase = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 | 0 |
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__snake_case =16
__snake_case =32
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : Dict = 16 , lowerCamelCase : str = "bert-base-cased" ):
lowerCAmelCase = AutoTokenizer.from_pretrained(_lowercase )
lowerCAmelCase = load_dataset('glue' , 'mrpc' )
def tokenize_function(lowerCamelCase : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowercase , max_length=_lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase = datasets.map(
_lowercase , batched=_lowercase , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_lowercase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowerCamelCase : Optional[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowercase , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(_lowercase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
lowerCAmelCase = DataLoader(
tokenized_datasets['train'] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
lowerCAmelCase = DataLoader(
tokenized_datasets['validation'] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
return train_dataloader, eval_dataloader
def a_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] ):
lowerCAmelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase = config['lr']
lowerCAmelCase = int(config['num_epochs'] )
lowerCAmelCase = int(config['seed'] )
lowerCAmelCase = int(config['batch_size'] )
lowerCAmelCase = args.model_name_or_path
set_seed(_lowercase )
lowerCAmelCase , lowerCAmelCase = get_dataloaders(_lowercase , _lowercase , _lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained(_lowercase , return_dict=_lowercase )
# Instantiate optimizer
lowerCAmelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCAmelCase = optimizer_cls(params=model.parameters() , lr=_lowercase )
if accelerator.state.deepspeed_plugin is not None:
lowerCAmelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
lowerCAmelCase = 1
lowerCAmelCase = (len(_lowercase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCAmelCase = get_linear_schedule_with_warmup(
optimizer=_lowercase , num_warmup_steps=0 , num_training_steps=_lowercase , )
else:
lowerCAmelCase = DummyScheduler(_lowercase , total_num_steps=_lowercase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# We need to keep track of how many total steps we have iterated over
lowerCAmelCase = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCAmelCase = 0
# Now we train the model
lowerCAmelCase = evaluate.load('glue' , 'mrpc' )
lowerCAmelCase = 0
lowerCAmelCase = {}
for epoch in range(_lowercase , _lowercase ):
model.train()
for step, batch in enumerate(_lowercase ):
lowerCAmelCase = model(**_lowercase )
lowerCAmelCase = outputs.loss
lowerCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(_lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
lowerCAmelCase = 0
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase = model(**_lowercase )
lowerCAmelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCAmelCase , lowerCAmelCase = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_lowercase ) - 1:
lowerCAmelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCAmelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_lowercase , references=_lowercase , )
lowerCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , _lowercase )
lowerCAmelCase = eval_metric['accuracy']
if best_performance < eval_metric["accuracy"]:
lowerCAmelCase = eval_metric['accuracy']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(_lowercase , _lowercase )
def a_ ( ):
lowerCAmelCase = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=_lowercase , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_lowercase , )
parser.add_argument(
'--output_dir' , type=_lowercase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=_lowercase , default=_lowercase , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=_lowercase , default=3 , help='Number of train epochs.' , )
lowerCAmelCase = parser.parse_args()
lowerCAmelCase = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(_lowercase , _lowercase )
if __name__ == "__main__":
main()
| 133 |
from collections.abc import Generator
from math import sin
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
if len(_lowercase ) != 32:
raise ValueError('Input must be of length 32' )
__UpperCamelCase = B''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__UpperCamelCase = format(_lowercase , '08x' )[-8:]
__UpperCamelCase = B''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
__UpperCamelCase = B''
for char in message:
bit_string += format(_lowercase , '08b' ).encode('utf-8' )
__UpperCamelCase = format(len(_lowercase ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(_lowercase ) % 5_12 != 4_48:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def _A ( _lowercase ) -> Generator[list[int], None, None]:
"""simple docstring"""
if len(_lowercase ) % 5_12 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(_lowercase ) , 5_12 ):
__UpperCamelCase = bit_string[pos : pos + 5_12]
__UpperCamelCase = []
for i in range(0 , 5_12 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def _A ( _lowercase ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__UpperCamelCase = format(_lowercase , '032b' )
__UpperCamelCase = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(_lowercase , 2 )
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
return (a + b) % 2**32
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
__UpperCamelCase = preprocess(_lowercase )
__UpperCamelCase = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
__UpperCamelCase = 0X67_45_23_01
__UpperCamelCase = 0Xef_cd_ab_89
__UpperCamelCase = 0X98_ba_dc_fe
__UpperCamelCase = 0X10_32_54_76
__UpperCamelCase = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(_lowercase ):
__UpperCamelCase = aa
__UpperCamelCase = ba
__UpperCamelCase = ca
__UpperCamelCase = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__UpperCamelCase = d ^ (b & (c ^ d))
__UpperCamelCase = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__UpperCamelCase = c ^ (d & (b ^ c))
__UpperCamelCase = (5 * i + 1) % 16
elif i <= 47:
__UpperCamelCase = b ^ c ^ d
__UpperCamelCase = (3 * i + 5) % 16
else:
__UpperCamelCase = c ^ (b | not_aa(_lowercase ))
__UpperCamelCase = (7 * i) % 16
__UpperCamelCase = (f + a + added_consts[i] + block_words[g]) % 2**32
__UpperCamelCase = d
__UpperCamelCase = c
__UpperCamelCase = b
__UpperCamelCase = sum_aa(_lowercase , left_rotate_aa(_lowercase , shift_amounts[i] ) )
# Add hashed chunk to running total
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = reformat_hex(_lowercase ) + reformat_hex(_lowercase ) + reformat_hex(_lowercase ) + reformat_hex(_lowercase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | 0 |
"""simple docstring"""
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
__snake_case : List[Any] = open # noqa: we just need to have a builtin inside this module to test it properly | 293 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__snake_case = 0
__snake_case = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__snake_case = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__snake_case = tuple[int, int]
class __lowerCamelCase :
def __init__( self: str,A_: int,A_: int,A_: int,A_: int,A_: int,A_: Node | None,):
'''simple docstring'''
__UpperCamelCase = pos_x
__UpperCamelCase = pos_y
__UpperCamelCase = (pos_y, pos_x)
__UpperCamelCase = goal_x
__UpperCamelCase = goal_y
__UpperCamelCase = g_cost
__UpperCamelCase = parent
__UpperCamelCase = self.calculate_heuristic()
__UpperCamelCase = self.g_cost + self.h_cost
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = self.pos_x - self.goal_x
__UpperCamelCase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(A_ ) + abs(A_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self: int,A_: Node ):
'''simple docstring'''
return self.f_cost < other.f_cost
class __lowerCamelCase :
def __init__( self: Any,A_: TPosition,A_: TPosition ):
'''simple docstring'''
__UpperCamelCase = Node(start[1],start[0],goal[1],goal[0],0,A_ )
__UpperCamelCase = Node(goal[1],goal[0],goal[1],goal[0],9_9999,A_ )
__UpperCamelCase = [self.start]
__UpperCamelCase = []
__UpperCamelCase = False
def snake_case_ ( self: Any ):
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__UpperCamelCase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(A_ )
self.closed_nodes.append(A_ )
__UpperCamelCase = self.get_successors(A_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(A_ )
else:
# retrieve the best current path
__UpperCamelCase = self.open_nodes.pop(self.open_nodes.index(A_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(A_ )
else:
self.open_nodes.append(A_ )
return [self.start.pos]
def snake_case_ ( self: int,A_: Node ):
'''simple docstring'''
__UpperCamelCase = []
for action in delta:
__UpperCamelCase = parent.pos_x + action[1]
__UpperCamelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
A_,A_,self.target.pos_y,self.target.pos_x,parent.g_cost + 1,A_,) )
return successors
def snake_case_ ( self: Any,A_: Node | None ):
'''simple docstring'''
__UpperCamelCase = node
__UpperCamelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__UpperCamelCase = current_node.parent
path.reverse()
return path
class __lowerCamelCase :
def __init__( self: List[Any],A_: TPosition,A_: TPosition ):
'''simple docstring'''
__UpperCamelCase = AStar(A_,A_ )
__UpperCamelCase = AStar(A_,A_ )
__UpperCamelCase = False
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__UpperCamelCase = self.fwd_astar.open_nodes.pop(0 )
__UpperCamelCase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
A_,A_ )
self.fwd_astar.closed_nodes.append(A_ )
self.bwd_astar.closed_nodes.append(A_ )
__UpperCamelCase = current_bwd_node
__UpperCamelCase = current_fwd_node
__UpperCamelCase = {
self.fwd_astar: self.fwd_astar.get_successors(A_ ),
self.bwd_astar: self.bwd_astar.get_successors(A_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(A_ )
else:
# retrieve the best current path
__UpperCamelCase = astar.open_nodes.pop(
astar.open_nodes.index(A_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(A_ )
else:
astar.open_nodes.append(A_ )
return [self.fwd_astar.start.pos]
def snake_case_ ( self: List[str],A_: Node,A_: Node ):
'''simple docstring'''
__UpperCamelCase = self.fwd_astar.retrace_path(A_ )
__UpperCamelCase = self.bwd_astar.retrace_path(A_ )
bwd_path.pop()
bwd_path.reverse()
__UpperCamelCase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__snake_case = (0, 0)
__snake_case = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__snake_case = time.time()
__snake_case = AStar(init, goal)
__snake_case = a_star.search()
__snake_case = time.time() - start_time
print(f"""AStar execution time = {end_time:f} seconds""")
__snake_case = time.time()
__snake_case = BidirectionalAStar(init, goal)
__snake_case = time.time() - bd_start_time
print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 1 | 0 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
snake_case = """pt"""
elif is_tf_available():
snake_case = """tf"""
else:
snake_case = """jax"""
class A_ ( _a , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = ByTaTokenizer
SCREAMING_SNAKE_CASE_ : List[str] = False
def __UpperCAmelCase ( self : Any ) -> Tuple:
super().setUp()
_lowercase = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def __UpperCAmelCase ( self : Optional[int] ,**__A : int ) -> Any:
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**A_ )
def __UpperCAmelCase ( self : Any ,__A : List[Any] ,__A : List[str]=False ,__A : List[Any]=20 ,__A : List[Any]=5 ) -> List[Any]:
_lowercase = []
for i in range(len(A_ ) ):
try:
_lowercase = tokenizer.decode([i] ,clean_up_tokenization_spaces=A_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_lowercase = list(filter(lambda __A : re.match(r'^[ a-zA-Z]+$' ,t[1] ) ,A_ ) )
_lowercase = list(filter(lambda __A : [t[0]] == tokenizer.encode(t[1] ,add_special_tokens=A_ ) ,A_ ) )
if max_length is not None and len(A_ ) > max_length:
_lowercase = toks[:max_length]
if min_length is not None and len(A_ ) < min_length and len(A_ ) > 0:
while len(A_ ) < min_length:
_lowercase = toks + toks
# toks_str = [t[1] for t in toks]
_lowercase = [t[0] for t in toks]
# Ensure consistency
_lowercase = tokenizer.decode(A_ ,clean_up_tokenization_spaces=A_ )
if " " not in output_txt and len(A_ ) > 1:
_lowercase = (
tokenizer.decode([toks_ids[0]] ,clean_up_tokenization_spaces=A_ )
+ ' '
+ tokenizer.decode(toks_ids[1:] ,clean_up_tokenization_spaces=A_ )
)
if with_prefix_space:
_lowercase = ' ' + output_txt
_lowercase = tokenizer.encode(A_ ,add_special_tokens=A_ )
return output_txt, output_ids
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
_lowercase = self.ta_base_tokenizer
_lowercase = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
_lowercase = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] ,batch_without_eos_added['input_ids'] )
def __UpperCAmelCase ( self : Dict ) -> int:
_lowercase = self.ta_base_tokenizer
_lowercase = 'Unicode €.'
_lowercase = tokenizer(A_ )
_lowercase = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['input_ids'] ,A_ )
# decoding
_lowercase = tokenizer.decode(A_ )
self.assertEqual(A_ ,'Unicode €.</s>' )
_lowercase = tokenizer('e è é ê ë' )
_lowercase = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['input_ids'] ,A_ )
# decoding
_lowercase = tokenizer.decode(A_ )
self.assertEqual(A_ ,'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) ,'e è é ê ë</s>' )
def __UpperCAmelCase ( self : Any ) -> str:
_lowercase = self.ta_base_tokenizer
_lowercase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_lowercase = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
_lowercase = tokenizer(A_ ,padding=A_ ,return_tensors=A_ )
self.assertIsInstance(A_ ,A_ )
if FRAMEWORK != "jax":
_lowercase = list(batch.input_ids.numpy()[0] )
else:
_lowercase = list(batch.input_ids.tolist()[0] )
self.assertListEqual(A_ ,A_ )
self.assertEqual((2, 37) ,batch.input_ids.shape )
self.assertEqual((2, 37) ,batch.attention_mask.shape )
def __UpperCAmelCase ( self : Dict ) -> str:
_lowercase = self.ta_base_tokenizer
_lowercase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_lowercase = tokenizer(A_ ,padding=A_ ,return_tensors=A_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' ,A_ )
self.assertIn('attention_mask' ,A_ )
self.assertNotIn('decoder_input_ids' ,A_ )
self.assertNotIn('decoder_attention_mask' ,A_ )
def __UpperCAmelCase ( self : Dict ) -> Dict:
_lowercase = self.ta_base_tokenizer
_lowercase = [
'Summary of the text.',
'Another summary.',
]
_lowercase = tokenizer(
text_target=A_ ,max_length=32 ,padding='max_length' ,truncation=A_ ,return_tensors=A_ )
self.assertEqual(32 ,targets['input_ids'].shape[1] )
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
_lowercase = self.ta_base_tokenizer
_lowercase = ['A long paragraph for summarization. </s>']
_lowercase = ['Summary of the text. </s>']
# fmt: off
_lowercase = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
_lowercase = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
_lowercase = tokenizer(A_ ,text_target=A_ )
self.assertEqual(A_ ,batch['input_ids'][0] )
self.assertEqual(A_ ,batch['labels'][0] )
def __UpperCAmelCase ( self : int ) -> Optional[int]:
_lowercase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length ,42 )
# Now let's start the test
_lowercase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase = tempfile.mkdtemp()
_lowercase = ' He is very happy, UNwant\u00E9d,running'
_lowercase = tokenizer.encode(A_ ,add_special_tokens=A_ )
tokenizer.save_pretrained(A_ )
_lowercase = tokenizer.__class__.from_pretrained(A_ )
_lowercase = after_tokenizer.encode(A_ ,add_special_tokens=A_ )
self.assertListEqual(A_ ,A_ )
shutil.rmtree(A_ )
_lowercase = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase = tempfile.mkdtemp()
_lowercase = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_lowercase = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_lowercase = tokenizer.encode(A_ ,add_special_tokens=A_ )
tokenizer.save_pretrained(A_ )
_lowercase = tokenizer.__class__.from_pretrained(A_ )
_lowercase = after_tokenizer.encode(A_ ,add_special_tokens=A_ )
self.assertListEqual(A_ ,A_ )
self.assertIn('new_additional_special_token' ,after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length ,42 )
_lowercase = tokenizer.__class__.from_pretrained(A_ ,model_max_length=43 )
self.assertEqual(tokenizer.model_max_length ,43 )
shutil.rmtree(A_ )
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
_lowercase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(A_ )
with open(os.path.join(A_ ,'special_tokens_map.json' ) ,encoding='utf-8' ) as json_file:
_lowercase = json.load(A_ )
with open(os.path.join(A_ ,'tokenizer_config.json' ) ,encoding='utf-8' ) as json_file:
_lowercase = json.load(A_ )
_lowercase = [F"""<extra_id_{i}>""" for i in range(125 )]
_lowercase = added_tokens_extra_ids + [
'an_additional_special_token'
]
_lowercase = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(A_ ,'special_tokens_map.json' ) ,'w' ,encoding='utf-8' ) as outfile:
json.dump(A_ ,A_ )
with open(os.path.join(A_ ,'tokenizer_config.json' ) ,'w' ,encoding='utf-8' ) as outfile:
json.dump(A_ ,A_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_lowercase = tokenizer_class.from_pretrained(
A_ ,)
self.assertIn(
'an_additional_special_token' ,tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] ,tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) ,)
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_lowercase = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' ,lstrip=A_ )]
_lowercase = tokenizer_class.from_pretrained(
A_ ,additional_special_tokens=A_ ,)
self.assertIn('a_new_additional_special_token' ,tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] ,tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) ,)
def __UpperCAmelCase ( self : str ) -> str:
_lowercase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(A_ )
_lowercase = tokenizer_class.from_pretrained(A_ )
self.assertTrue(tokenizer.decode([255] ) == '' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
pass
def __UpperCAmelCase ( self : Dict ) -> Dict:
pass
def __UpperCAmelCase ( self : Any ) -> str:
pass
def __UpperCAmelCase ( self : Tuple ) -> str:
pass
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
_lowercase = self.get_tokenizers(fast=A_ ,do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowercase = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
_lowercase = tokenizer.convert_tokens_to_string(A_ )
self.assertIsInstance(A_ ,A_ )
def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
_lowercase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowercase = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
_lowercase = 0
_lowercase = tokenizer.convert_ids_to_tokens(
A_ ,skip_special_tokens=A_ )
for attr in attributes_list:
setattr(A_ ,attr + '_id' ,A_ )
self.assertEqual(getattr(A_ ,A_ ) ,A_ )
self.assertEqual(getattr(A_ ,attr + '_id' ) ,A_ )
setattr(A_ ,attr + '_id' ,A_ )
self.assertEqual(getattr(A_ ,A_ ) ,A_ )
self.assertEqual(getattr(A_ ,attr + '_id' ) ,A_ )
setattr(A_ ,'additional_special_tokens_ids' ,[] )
self.assertListEqual(getattr(A_ ,'additional_special_tokens' ) ,[] )
self.assertListEqual(getattr(A_ ,'additional_special_tokens_ids' ) ,[] )
setattr(A_ ,'additional_special_tokens_ids' ,[token_id_to_test_setters] )
self.assertListEqual(getattr(A_ ,'additional_special_tokens' ) ,[token_to_test_setters] )
self.assertListEqual(getattr(A_ ,'additional_special_tokens_ids' ) ,[token_id_to_test_setters] ) | 67 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__snake_case = get_tests_dir('''fixtures''')
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: int ):
'''simple docstring'''
__UpperCamelCase = mock.Mock()
__UpperCamelCase = 500
__UpperCamelCase = {}
__UpperCamelCase = HTTPError
__UpperCamelCase = {}
# Download this model to make sure it's in the cache.
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request',return_value=A_ ) as mock_head:
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' )
@is_staging_test
class __lowerCamelCase (unittest.TestCase ):
@classmethod
def snake_case_ ( cls: Tuple ):
'''simple docstring'''
__UpperCamelCase = TOKEN
HfFolder.save_token(A_ )
@classmethod
def snake_case_ ( cls: Tuple ):
'''simple docstring'''
try:
delete_repo(token=cls._token,repo_id='test-feature-extractor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token,repo_id='valid_org/test-feature-extractor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token,repo_id='test-dynamic-feature-extractor' )
except HTTPError:
pass
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('test-feature-extractor',use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
# Reset repo
delete_repo(token=self._token,repo_id='test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
A_,repo_id='test-feature-extractor',push_to_hub=A_,use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('valid_org/test-feature-extractor',use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
# Reset repo
delete_repo(token=self._token,repo_id='valid_org/test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
A_,repo_id='valid_org/test-feature-extractor-org',push_to_hub=A_,use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
def snake_case_ ( self: int ):
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
__UpperCamelCase = CustomFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('test-dynamic-feature-extractor',use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map,{'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'},)
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(
F'''{USER}/test-dynamic-feature-extractor''',trust_remote_code=A_ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__,'CustomFeatureExtractor' )
| 1 | 0 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = '''▁'''
_snake_case = {'''vocab_file''': '''prophetnet.tokenizer'''}
_snake_case = {
'''vocab_file''': {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'''
),
}
}
_snake_case = {
'''microsoft/xprophetnet-large-wiki100-cased''': {'''do_lower_case''': False},
}
_snake_case = {
'''microsoft/xprophetnet-large-wiki100-cased''': 5_12,
}
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = collections.OrderedDict()
with open(_lowercase , "r" , encoding="utf-8" ) as reader:
lowerCamelCase : int = reader.readlines()
for index, token in enumerate(_lowercase ):
lowerCamelCase : str = token.rstrip("\n" )
lowerCamelCase : List[Any] = index
return vocab
class UpperCAmelCase_ ( _a ):
'''simple docstring'''
__A : int = VOCAB_FILES_NAMES
__A : Tuple = PRETRAINED_VOCAB_FILES_MAP
__A : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : List[str] = ["input_ids", "attention_mask"]
def __init__( self , __A , __A="[SEP]" , __A="[SEP]" , __A="[SEP]" , __A="[UNK]" , __A="[PAD]" , __A="[CLS]" , __A="[MASK]" , __A = None , **__A , ):
"""simple docstring"""
lowerCamelCase : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ , eos_token=A_ , sep_token=A_ , unk_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
lowerCamelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A_ ) )
lowerCamelCase : Tuple = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
lowerCamelCase : Union[str, Any] = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10 ):
lowerCamelCase : Tuple = F"""[unused{i}]"""
lowerCamelCase : Optional[int] = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
lowerCamelCase : Any = 12
lowerCamelCase : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(A_ )
def __getstate__( self ):
"""simple docstring"""
lowerCamelCase : Dict = self.__dict__.copy()
lowerCamelCase : List[Any] = None
return state
def __setstate__( self , __A ):
"""simple docstring"""
lowerCamelCase : Any = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase : List[Any] = {}
lowerCamelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self , __A , __A = None , __A = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return ([0] * len(A_ )) + [1]
return ([0] * len(A_ )) + [1] + ([0] * len(A_ )) + [1]
def _snake_case ( self , __A , __A = None ):
"""simple docstring"""
lowerCamelCase : Optional[int] = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _snake_case ( self ):
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Any = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , __A ):
"""simple docstring"""
return self.sp_model.encode(A_ , out_type=A_ )
def _snake_case ( self , __A ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase : Tuple = self.sp_model.PieceToId(A_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self , __A ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : Tuple = "".join(A_ ).replace(A_ , " " ).strip()
return out_string
def _snake_case ( self , __A , __A = None ):
"""simple docstring"""
if not os.path.isdir(A_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase : List[str] = os.path.join(
A_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ , "wb" ) as fi:
lowerCamelCase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,)
def _snake_case ( self , __A , __A = None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
lowerCamelCase : int = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 340 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__snake_case = 1_6
__snake_case = 3_2
def _A ( _lowercase , _lowercase = 16 , _lowercase = "bert-base-cased" ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = AutoTokenizer.from_pretrained(_lowercase )
__UpperCamelCase = load_dataset('glue' , 'mrpc' )
def tokenize_function(_lowercase ):
# max_length=None => use the model max length (it's actually the default)
__UpperCamelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowercase , max_length=_lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__UpperCamelCase = datasets.map(
_lowercase , batched=_lowercase , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_lowercase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCamelCase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowercase , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return tokenizer.pad(_lowercase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
__UpperCamelCase = DataLoader(
tokenized_datasets['train'] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
__UpperCamelCase = DataLoader(
tokenized_datasets['validation'] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
return train_dataloader, eval_dataloader
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCamelCase = config['lr']
__UpperCamelCase = int(config['num_epochs'] )
__UpperCamelCase = int(config['seed'] )
__UpperCamelCase = int(config['batch_size'] )
__UpperCamelCase = args.model_name_or_path
set_seed(_lowercase )
__UpperCamelCase, __UpperCamelCase = get_dataloaders(_lowercase , _lowercase , _lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCamelCase = AutoModelForSequenceClassification.from_pretrained(_lowercase , return_dict=_lowercase )
# Instantiate optimizer
__UpperCamelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__UpperCamelCase = optimizer_cls(params=model.parameters() , lr=_lowercase )
if accelerator.state.deepspeed_plugin is not None:
__UpperCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
__UpperCamelCase = 1
__UpperCamelCase = (len(_lowercase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=_lowercase , num_warmup_steps=0 , num_training_steps=_lowercase , )
else:
__UpperCamelCase = DummyScheduler(_lowercase , total_num_steps=_lowercase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# We need to keep track of how many total steps we have iterated over
__UpperCamelCase = 0
# We also need to keep track of the stating epoch so files are named properly
__UpperCamelCase = 0
# Now we train the model
__UpperCamelCase = evaluate.load('glue' , 'mrpc' )
__UpperCamelCase = 0
__UpperCamelCase = {}
for epoch in range(_lowercase , _lowercase ):
model.train()
for step, batch in enumerate(_lowercase ):
__UpperCamelCase = model(**_lowercase )
__UpperCamelCase = outputs.loss
__UpperCamelCase = loss / gradient_accumulation_steps
accelerator.backward(_lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
__UpperCamelCase = 0
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCamelCase = model(**_lowercase )
__UpperCamelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__UpperCamelCase, __UpperCamelCase = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_lowercase ) - 1:
__UpperCamelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__UpperCamelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_lowercase , references=_lowercase , )
__UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , _lowercase )
__UpperCamelCase = eval_metric['accuracy']
if best_performance < eval_metric["accuracy"]:
__UpperCamelCase = eval_metric['accuracy']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(_lowercase , _lowercase )
def _A ( ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=_lowercase , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_lowercase , )
parser.add_argument(
'--output_dir' , type=_lowercase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=_lowercase , default=_lowercase , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=_lowercase , default=3 , help='Number of train epochs.' , )
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(_lowercase , _lowercase )
if __name__ == "__main__":
main()
| 1 | 0 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Tuple , __A : List[str] , __A : List[Any]=2 , __A : Union[str, Any]=True , __A : Dict=False , __A : Union[str, Any]=1_0 , __A : Any=3 , __A : Any=3_2 * 4 , __A : int=3_2 * 6 , __A : Optional[int]=4 , __A : List[str]=3_2 , ):
snake_case__ : List[str] = parent
snake_case__ : Optional[int] = batch_size
snake_case__ : Any = is_training
snake_case__ : List[str] = use_auxiliary_loss
snake_case__ : List[str] = num_queries
snake_case__ : int = num_channels
snake_case__ : Union[str, Any] = min_size
snake_case__ : Dict = max_size
snake_case__ : Optional[Any] = num_labels
snake_case__ : Any = mask_feature_size
def _lowercase ( self : str ):
snake_case__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
A_ )
snake_case__ : Optional[int] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=A_ )
snake_case__ : List[str] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=A_ ) > 0.5
).float()
snake_case__ : List[Any] = (torch.rand((self.batch_size, self.num_labels) , device=A_ ) > 0.5).long()
snake_case__ : Union[str, Any] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _lowercase ( self : Optional[Any] ):
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def _lowercase ( self : Union[str, Any] ):
snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ : Tuple = self.prepare_config_and_inputs()
snake_case__ : Dict = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def _lowercase ( self : int , __A : str , __A : Union[str, Any] ):
snake_case__ : Union[str, Any] = output.encoder_hidden_states
snake_case__ : List[str] = output.pixel_decoder_hidden_states
snake_case__ : Tuple = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(A_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(A_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(A_ ) , config.decoder_config.decoder_layers )
def _lowercase ( self : List[Any] , __A : Dict , __A : Any , __A : Optional[int] , __A : List[Any]=False ):
with torch.no_grad():
snake_case__ : Union[str, Any] = MaskFormerModel(config=A_ )
model.to(A_ )
model.eval()
snake_case__ : Optional[int] = model(pixel_values=A_ , pixel_mask=A_ )
snake_case__ : Tuple = model(A_ , output_hidden_states=A_ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(A_ , A_ )
def _lowercase ( self : List[str] , __A : str , __A : List[Any] , __A : List[str] , __A : str , __A : Union[str, Any] ):
snake_case__ : Union[str, Any] = MaskFormerForInstanceSegmentation(config=A_ )
model.to(A_ )
model.eval()
def comm_check_on_output(__A : Optional[int] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
snake_case__ : List[Any] = model(pixel_values=A_ , pixel_mask=A_ )
snake_case__ : Optional[Any] = model(A_ )
comm_check_on_output(A_ )
snake_case__ : Any = model(
pixel_values=A_ , pixel_mask=A_ , mask_labels=A_ , class_labels=A_ )
comm_check_on_output(A_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( _a , _a , unittest.TestCase ):
"""simple docstring"""
a_ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
a_ = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
a_ = False
a_ = False
a_ = False
a_ = False
def _lowercase ( self : Dict ):
snake_case__ : Any = MaskFormerModelTester(self )
snake_case__ : int = ConfigTester(self , config_class=A_ , has_text_modality=A_ )
def _lowercase ( self : int ):
self.config_tester.run_common_tests()
def _lowercase ( self : List[str] ):
snake_case__, snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(A_ , **A_ , output_hidden_states=A_ )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*A_ )
@unittest.skip(reason="MaskFormer does not use inputs_embeds" )
def _lowercase ( self : Tuple ):
pass
@unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" )
def _lowercase ( self : Tuple ):
pass
@unittest.skip(reason="MaskFormer is not a generative model" )
def _lowercase ( self : Any ):
pass
@unittest.skip(reason="MaskFormer does not use token embeddings" )
def _lowercase ( self : Tuple ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason="MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`" )
def _lowercase ( self : List[str] ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowercase ( self : Any ):
pass
def _lowercase ( self : Optional[Any] ):
snake_case__, snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Optional[int] = model_class(A_ )
snake_case__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Optional[int] = [*signature.parameters.keys()]
snake_case__ : str = ["pixel_values"]
self.assertListEqual(arg_names[:1] , A_ )
@slow
def _lowercase ( self : Any ):
for model_name in ["facebook/maskformer-swin-small-coco"]:
snake_case__ : Union[str, Any] = MaskFormerModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _lowercase ( self : int ):
snake_case__ : int = (self.model_tester.min_size,) * 2
snake_case__ : List[Any] = {
"pixel_values": torch.randn((2, 3, *size) , device=A_ ),
"mask_labels": torch.randn((2, 1_0, *size) , device=A_ ),
"class_labels": torch.zeros(2 , 1_0 , device=A_ ).long(),
}
snake_case__ : List[str] = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(A_ )
snake_case__ : Optional[Any] = model(**A_ )
self.assertTrue(outputs.loss is not None )
def _lowercase ( self : Tuple ):
snake_case__, snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(A_ , **A_ , output_hidden_states=A_ )
def _lowercase ( self : Any ):
snake_case__, snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : int = model_class(A_ ).to(A_ )
snake_case__ : int = model(**A_ , output_attentions=A_ )
self.assertTrue(outputs.attentions is not None )
def _lowercase ( self : Any ):
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
snake_case__ : List[str] = self.all_model_classes[1]
snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
snake_case__ : str = model_class(A_ )
model.to(A_ )
model.train()
snake_case__ : Union[str, Any] = model(A_ , mask_labels=A_ , class_labels=A_ ).loss
loss.backward()
def _lowercase ( self : List[Any] ):
snake_case__ : Union[str, Any] = self.all_model_classes[1]
snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
snake_case__ : List[str] = True
snake_case__ : Optional[Any] = True
snake_case__ : str = model_class(A_ )
model.to(A_ )
model.train()
snake_case__ : Any = model(A_ , mask_labels=A_ , class_labels=A_ )
snake_case__ : Union[str, Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
snake_case__ : Optional[int] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
snake_case__ : Optional[int] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
snake_case__ : Any = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=A_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__lowerCamelCase : Dict = 1e-4
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : Dict ):
return (
MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" )
if is_vision_available()
else None
)
def _lowercase ( self : Dict ):
snake_case__ : int = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(A_ )
snake_case__ : List[Any] = self.default_image_processor
snake_case__ : List[Any] = prepare_img()
snake_case__ : str = image_processor(A_ , return_tensors="pt" ).to(A_ )
snake_case__ : List[Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(A_ , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
snake_case__ : List[str] = model(**A_ )
snake_case__ : Dict = torch.tensor(
[[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(A_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , A_ , atol=A_ ) )
snake_case__ : Union[str, Any] = torch.tensor(
[[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(A_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , A_ , atol=A_ ) )
snake_case__ : Dict = torch.tensor(
[[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(A_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , A_ , atol=A_ ) )
def _lowercase ( self : Optional[Any] ):
snake_case__ : Optional[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(A_ )
.eval()
)
snake_case__ : List[str] = self.default_image_processor
snake_case__ : List[Any] = prepare_img()
snake_case__ : Dict = image_processor(A_ , return_tensors="pt" ).to(A_ )
snake_case__ : Any = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(A_ , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
snake_case__ : List[Any] = model(**A_ )
# masks_queries_logits
snake_case__ : Optional[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
snake_case__ : Optional[int] = [
[-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3],
[-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5],
[-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2],
]
snake_case__ : Optional[Any] = torch.tensor(A_ ).to(A_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , A_ , atol=A_ ) )
# class_queries_logits
snake_case__ : Dict = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
snake_case__ : List[str] = torch.tensor(
[
[1.6_5_1_2e0_0, -5.2_5_7_2e0_0, -3.3_5_1_9e0_0],
[3.6_1_6_9e-0_2, -5.9_0_2_5e0_0, -2.9_3_1_3e0_0],
[1.0_7_6_6e-0_4, -7.7_6_3_0e0_0, -5.1_2_6_3e0_0],
] ).to(A_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , A_ , atol=A_ ) )
def _lowercase ( self : List[str] ):
snake_case__ : Dict = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" )
.to(A_ )
.eval()
)
snake_case__ : Dict = self.default_image_processor
snake_case__ : Optional[int] = prepare_img()
snake_case__ : Tuple = image_processor(A_ , return_tensors="pt" ).to(A_ )
snake_case__ : int = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(A_ , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
snake_case__ : Union[str, Any] = model(**A_ )
# masks_queries_logits
snake_case__ : int = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
snake_case__ : str = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]]
snake_case__ : Tuple = torch.tensor(A_ ).to(A_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , A_ , atol=A_ ) )
# class_queries_logits
snake_case__ : List[str] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
snake_case__ : int = torch.tensor(
[[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(A_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , A_ , atol=A_ ) )
def _lowercase ( self : str ):
snake_case__ : Optional[int] = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(A_ )
.eval()
)
snake_case__ : int = self.default_image_processor
snake_case__ : int = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors="pt" , )
snake_case__ : Union[str, Any] = inputs["pixel_values"].to(A_ )
snake_case__ : Dict = [el.to(A_ ) for el in inputs["mask_labels"]]
snake_case__ : List[Any] = [el.to(A_ ) for el in inputs["class_labels"]]
with torch.no_grad():
snake_case__ : Tuple = model(**A_ )
self.assertTrue(outputs.loss is not None )
| 297 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __lowerCamelCase (_a ):
@slow
@require_torch
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny','prajjwal1/bert-tiny' )
__UpperCamelCase = BertTokenizer.from_pretrained('bert-base-uncased' )
__UpperCamelCase = bertabert.config.encoder.vocab_size
__UpperCamelCase = tokenizer.sep_token_id
__UpperCamelCase = tokenizer.cls_token_id
__UpperCamelCase = 128
__UpperCamelCase = datasets.load_dataset('cnn_dailymail','3.0.0',split='train[:1%]' )
__UpperCamelCase = datasets.load_dataset('cnn_dailymail','3.0.0',split='validation[:1%]' )
__UpperCamelCase = train_dataset.select(range(32 ) )
__UpperCamelCase = val_dataset.select(range(16 ) )
__UpperCamelCase = 4
def _map_to_encoder_decoder_inputs(A_: Dict ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__UpperCamelCase = tokenizer(batch['article'],padding='max_length',truncation=A_,max_length=512 )
__UpperCamelCase = tokenizer(batch['highlights'],padding='max_length',truncation=A_,max_length=128 )
__UpperCamelCase = inputs.input_ids
__UpperCamelCase = inputs.attention_mask
__UpperCamelCase = outputs.input_ids
__UpperCamelCase = outputs.input_ids.copy()
__UpperCamelCase = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
__UpperCamelCase = outputs.attention_mask
assert all(len(A_ ) == 512 for x in inputs.input_ids )
assert all(len(A_ ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(A_: str ):
__UpperCamelCase = pred.label_ids
__UpperCamelCase = pred.predictions
# all unnecessary tokens are removed
__UpperCamelCase = tokenizer.batch_decode(A_,skip_special_tokens=A_ )
__UpperCamelCase = tokenizer.batch_decode(A_,skip_special_tokens=A_ )
__UpperCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(A_ ) )] ) / len(A_ )
return {"accuracy": accuracy}
# map train dataset
__UpperCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs,batched=A_,batch_size=A_,remove_columns=['article', 'highlights'],)
train_dataset.set_format(
type='torch',columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'],)
# same for validation dataset
__UpperCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs,batched=A_,batch_size=A_,remove_columns=['article', 'highlights'],)
val_dataset.set_format(
type='torch',columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'],)
__UpperCamelCase = self.get_auto_remove_tmp_dir()
__UpperCamelCase = SeqaSeqTrainingArguments(
output_dir=A_,per_device_train_batch_size=A_,per_device_eval_batch_size=A_,predict_with_generate=A_,evaluation_strategy='steps',do_train=A_,do_eval=A_,warmup_steps=0,eval_steps=2,logging_steps=2,)
# instantiate trainer
__UpperCamelCase = SeqaSeqTrainer(
model=A_,args=A_,compute_metrics=_compute_metrics,train_dataset=A_,eval_dataset=A_,tokenizer=A_,)
# start training
trainer.train()
| 1 | 0 |
'''simple docstring'''
from functools import reduce
__A = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def _A ( lowercase__ = N ):
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowercase__ , lowercase__ : str(int(_lowercase ) * int(_lowercase ) ) , n[i : i + 13] ) )
for i in range(len(_lowercase ) - 12 ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 325 |
def _A ( _lowercase = 1_00 ) -> int:
"""simple docstring"""
__UpperCamelCase = 0
__UpperCamelCase = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 1 | 0 |
"""simple docstring"""
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
'''simple docstring'''
def count_of_possible_combinations(__lowerCAmelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_lowercase )
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
__lowerCAmelCase , __lowerCAmelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowerCamelCase__ =sum(
count_of_possible_combinations_with_dp_array(target - item , _lowercase )
for item in array )
lowerCamelCase__ =answer
return answer
lowerCamelCase__ =[-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_lowercase , _lowercase )
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
'''simple docstring'''
lowerCamelCase__ =[0] * (target + 1)
lowerCamelCase__ =1
for i in range(1 , target + 1 ):
for j in range(_lowercase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
a =3
a =5
a =[1, 2, 5]
print(combination_sum_iv(n, array, target))
| 530 |
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def _A ( _lowercase , _lowercase=0 ) -> Dict:
"""simple docstring"""
return sorted(_lowercase , key=lambda _lowercase : x[column] )
def _A ( _lowercase , _lowercase , _lowercase=float('inf' ) ) -> List[Any]:
"""simple docstring"""
for i in range(points_counts - 1 ):
for j in range(i + 1 , _lowercase ):
__UpperCamelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__UpperCamelCase = current_dis
return min_dis
def _A ( _lowercase , _lowercase , _lowercase=float('inf' ) ) -> Tuple:
"""simple docstring"""
for i in range(min(6 , points_counts - 1 ) , _lowercase ):
for j in range(max(0 , i - 6 ) , _lowercase ):
__UpperCamelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__UpperCamelCase = current_dis
return min_dis
def _A ( _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
"""simple docstring"""
if points_counts <= 3:
return dis_between_closest_pair(_lowercase , _lowercase )
# recursion
__UpperCamelCase = points_counts // 2
__UpperCamelCase = closest_pair_of_points_sqr(
_lowercase , points_sorted_on_y[:mid] , _lowercase )
__UpperCamelCase = closest_pair_of_points_sqr(
_lowercase , points_sorted_on_y[mid:] , points_counts - mid )
__UpperCamelCase = min(_lowercase , _lowercase )
__UpperCamelCase = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(_lowercase )
__UpperCamelCase = dis_between_closest_in_strip(
_lowercase , len(_lowercase ) , _lowercase )
return min(_lowercase , _lowercase )
def _A ( _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = column_based_sort(_lowercase , column=0 )
__UpperCamelCase = column_based_sort(_lowercase , column=1 )
return (
closest_pair_of_points_sqr(
_lowercase , _lowercase , _lowercase )
) ** 0.5
if __name__ == "__main__":
__snake_case = [(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)]
print('''Distance:''', closest_pair_of_points(points, len(points)))
| 1 | 0 |
import string
def lowerCamelCase_ ( __UpperCamelCase ):
for key in range(len(string.ascii_uppercase ) ):
A_ = ''''''
for symbol in message:
if symbol in string.ascii_uppercase:
A_ = string.ascii_uppercase.find(_lowercase )
A_ = num - key
if num < 0:
A_ = num + len(string.ascii_uppercase )
A_ = translated + string.ascii_uppercase[num]
else:
A_ = translated + symbol
print(F"Decryption using Key #{key}: {translated}" )
def lowerCamelCase_ ( ):
A_ = input('''Encrypted message: ''' )
A_ = message.upper()
decrypt(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 141 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''',
'''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''',
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''',
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''',
'''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'''
),
'''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class __lowerCamelCase (_a ):
_lowercase = """bert"""
def __init__( self: Any,A_: Dict=3_0522,A_: Optional[Any]=768,A_: Union[str, Any]=12,A_: List[Any]=12,A_: Optional[int]=3072,A_: Union[str, Any]="gelu",A_: List[str]=0.1,A_: Dict=0.1,A_: Optional[int]=512,A_: Optional[Any]=2,A_: Union[str, Any]=0.0_2,A_: List[Any]=1E-12,A_: Optional[int]=0,A_: List[Any]="absolute",A_: str=True,A_: Union[str, Any]=None,**A_: int,):
'''simple docstring'''
super().__init__(pad_token_id=A_,**A_ )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = position_embedding_type
__UpperCamelCase = use_cache
__UpperCamelCase = classifier_dropout
class __lowerCamelCase (_a ):
@property
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 1 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class SCREAMING_SNAKE_CASE ( _a ):
'''simple docstring'''
__UpperCamelCase = "roformer"
def __init__( self , SCREAMING_SNAKE_CASE__=5_00_00 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=15_36 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
super().__init__(pad_token_id=A_ , **A_ )
snake_case: List[str] = vocab_size
snake_case: Any = hidden_size if embedding_size is None else embedding_size
snake_case: Union[str, Any] = hidden_size
snake_case: Optional[int] = num_hidden_layers
snake_case: int = num_attention_heads
snake_case: Optional[Any] = hidden_act
snake_case: Any = intermediate_size
snake_case: List[str] = hidden_dropout_prob
snake_case: Dict = attention_probs_dropout_prob
snake_case: Dict = max_position_embeddings
snake_case: Optional[Any] = type_vocab_size
snake_case: Dict = initializer_range
snake_case: Optional[int] = layer_norm_eps
snake_case: str = rotary_value
snake_case: Tuple = use_cache
class SCREAMING_SNAKE_CASE ( _a ):
'''simple docstring'''
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
snake_case: Any = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
snake_case: Any = {0: 'batch', 1: 'sequence'}
snake_case: Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] ) | 329 |
def _A ( _lowercase ) -> int:
"""simple docstring"""
assert column_title.isupper()
__UpperCamelCase = 0
__UpperCamelCase = len(_lowercase ) - 1
__UpperCamelCase = 0
while index >= 0:
__UpperCamelCase = (ord(column_title[index] ) - 64) * pow(26 , _lowercase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 | 0 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__a = '''\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
'''
__a = '''\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
'''
__a = '''\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results["google_bleu"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results["google_bleu"], 2))
0.4
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def __lowerCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 1 , SCREAMING_SNAKE_CASE__ = 4 , ):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=A_ , hypotheses=A_ , min_len=A_ , max_len=A_ )
}
| 319 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _A ( ) -> int:
"""simple docstring"""
__UpperCamelCase = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
__UpperCamelCase = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ).convert('RGB' )
return image
def _A ( _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def _A ( _lowercase , _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = dct.pop(_lowercase )
__UpperCamelCase = val
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__UpperCamelCase = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
__UpperCamelCase = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__UpperCamelCase = torch.cat((q_bias, torch.zeros_like(_lowercase , requires_grad=_lowercase ), v_bias) )
__UpperCamelCase = qkv_bias
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
__UpperCamelCase = 3_64 if 'coco' in model_name else 2_24
__UpperCamelCase = BlipaVisionConfig(image_size=_lowercase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__UpperCamelCase = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=_lowercase ).to_dict()
elif "opt-6.7b" in model_name:
__UpperCamelCase = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=_lowercase ).to_dict()
elif "t5-xl" in model_name:
__UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
__UpperCamelCase = BlipaConfig(vision_config=_lowercase , text_config=_lowercase )
return config, image_size
@torch.no_grad()
def _A ( _lowercase , _lowercase=None , _lowercase=False ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
__UpperCamelCase = tokenizer('\n' , add_special_tokens=_lowercase ).input_ids[0]
__UpperCamelCase, __UpperCamelCase = get_blipa_config(_lowercase , eos_token_id=_lowercase )
__UpperCamelCase = BlipaForConditionalGeneration(_lowercase ).eval()
__UpperCamelCase = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
__UpperCamelCase, __UpperCamelCase = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
__UpperCamelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = load_model_and_preprocess(
name=_lowercase , model_type=_lowercase , is_eval=_lowercase , device=_lowercase )
original_model.eval()
print('Done!' )
# update state dict keys
__UpperCamelCase = original_model.state_dict()
__UpperCamelCase = create_rename_keys(_lowercase )
for src, dest in rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__UpperCamelCase = state_dict.pop(_lowercase )
if key.startswith('Qformer.bert' ):
__UpperCamelCase = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
__UpperCamelCase = key.replace('self' , 'attention' )
if "opt_proj" in key:
__UpperCamelCase = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
__UpperCamelCase = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
__UpperCamelCase = key.replace('opt' , 'language' )
if key.startswith('t5' ):
__UpperCamelCase = key.replace('t5' , 'language' )
__UpperCamelCase = val
# read in qv biases
read_in_q_v_bias(_lowercase , _lowercase )
__UpperCamelCase, __UpperCamelCase = hf_model.load_state_dict(_lowercase , strict=_lowercase )
assert len(_lowercase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__UpperCamelCase = load_demo_image()
__UpperCamelCase = vis_processors['eval'](_lowercase ).unsqueeze(0 ).to(_lowercase )
__UpperCamelCase = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(_lowercase )
# create processor
__UpperCamelCase = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=_lowercase , image_std=_lowercase )
__UpperCamelCase = BlipaProcessor(image_processor=_lowercase , tokenizer=_lowercase )
__UpperCamelCase = processor(images=_lowercase , return_tensors='pt' ).pixel_values.to(_lowercase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowercase , _lowercase )
original_model.to(_lowercase )
hf_model.to(_lowercase )
with torch.no_grad():
if "opt" in model_name:
__UpperCamelCase = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
__UpperCamelCase = hf_model(_lowercase , _lowercase ).logits
else:
__UpperCamelCase = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
__UpperCamelCase = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
__UpperCamelCase = hf_model(_lowercase , _lowercase , labels=_lowercase ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__UpperCamelCase = torch.tensor(
[[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=_lowercase )
assert torch.allclose(logits[0, :3, :3] , _lowercase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__UpperCamelCase = torch.tensor(
[[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=_lowercase )
else:
# cast to same type
__UpperCamelCase = logits.dtype
assert torch.allclose(original_logits.to(_lowercase ) , _lowercase , atol=1e-2 )
print('Looks ok!' )
print('Generating a caption...' )
__UpperCamelCase = ''
__UpperCamelCase = tokenizer(_lowercase , return_tensors='pt' ).input_ids.to(_lowercase )
__UpperCamelCase = original_model.generate({'image': original_pixel_values} )
__UpperCamelCase = hf_model.generate(
_lowercase , _lowercase , do_sample=_lowercase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , _lowercase )
__UpperCamelCase = input_ids.shape[1]
__UpperCamelCase = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowercase )
__UpperCamelCase = [text.strip() for text in output_text]
print('HF generation:' , _lowercase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowercase )
hf_model.save_pretrained(_lowercase )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
__snake_case = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
__snake_case = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 1 | 0 |
'''simple docstring'''
def a ( UpperCamelCase_ : Union[str, Any] = 4000000 ) -> int:
snake_case__ =[]
snake_case__ , snake_case__ =0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_lowercase )
snake_case__ , snake_case__ =b, a + b
return sum(_lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 538 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__snake_case = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
_lowercase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_lowercase = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_lowercase = field(default=_a , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_lowercase = field(
default=_a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __lowerCamelCase :
_lowercase = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
_lowercase = field(
default=_a , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
_lowercase = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_lowercase = field(
default=_a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _A ( ) -> str:
"""simple docstring"""
__UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
__UpperCamelCase = import_module('tasks' )
try:
__UpperCamelCase = getattr(_lowercase , model_args.task_type )
__UpperCamelCase = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _lowercase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
__UpperCamelCase = token_classification_task.get_labels(data_args.labels )
__UpperCamelCase = dict(enumerate(_lowercase ) )
__UpperCamelCase = len(_lowercase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowercase , idalabel=_lowercase , labelaid={label: i for i, label in enumerate(_lowercase )} , cache_dir=model_args.cache_dir , )
__UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
__UpperCamelCase = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , )
# Get datasets
__UpperCamelCase = (
TokenClassificationDataset(
token_classification_task=_lowercase , data_dir=data_args.data_dir , tokenizer=_lowercase , labels=_lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__UpperCamelCase = (
TokenClassificationDataset(
token_classification_task=_lowercase , data_dir=data_args.data_dir , tokenizer=_lowercase , labels=_lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(_lowercase , _lowercase ) -> Tuple[List[int], List[int]]:
__UpperCamelCase = np.argmax(_lowercase , axis=2 )
__UpperCamelCase, __UpperCamelCase = preds.shape
__UpperCamelCase = [[] for _ in range(_lowercase )]
__UpperCamelCase = [[] for _ in range(_lowercase )]
for i in range(_lowercase ):
for j in range(_lowercase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(_lowercase ) -> Dict:
__UpperCamelCase, __UpperCamelCase = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(_lowercase , _lowercase ),
"precision": precision_score(_lowercase , _lowercase ),
"recall": recall_score(_lowercase , _lowercase ),
"f1": fa_score(_lowercase , _lowercase ),
}
# Data collator
__UpperCamelCase = DataCollatorWithPadding(_lowercase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__UpperCamelCase = Trainer(
model=_lowercase , args=_lowercase , train_dataset=_lowercase , eval_dataset=_lowercase , compute_metrics=_lowercase , data_collator=_lowercase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCamelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__UpperCamelCase = trainer.evaluate()
__UpperCamelCase = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(_lowercase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , _lowercase , _lowercase )
writer.write('%s = %s\n' % (key, value) )
results.update(_lowercase )
# Predict
if training_args.do_predict:
__UpperCamelCase = TokenClassificationDataset(
token_classification_task=_lowercase , data_dir=data_args.data_dir , tokenizer=_lowercase , labels=_lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = trainer.predict(_lowercase )
__UpperCamelCase, __UpperCamelCase = align_predictions(_lowercase , _lowercase )
__UpperCamelCase = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(_lowercase , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , _lowercase , _lowercase )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
__UpperCamelCase = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(_lowercase , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(_lowercase , _lowercase , _lowercase )
return results
def _A ( _lowercase ) -> Dict:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 1 | 0 |
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
__snake_case =parser.parse_args()
__snake_case =UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
__snake_case =CLIPImageProcessor()
__snake_case =CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
__snake_case =UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 133 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def _A ( *_lowercase ) -> Tuple:
"""simple docstring"""
with open(_lowercase , 'r' ) as fh:
fcntl.flock(_lowercase , fcntl.LOCK_EX )
try:
print(*_lowercase )
finally:
fcntl.flock(_lowercase , fcntl.LOCK_UN )
__snake_case = int(os.environ['''LOCAL_RANK'''])
torch.cuda.set_device(local_rank)
__snake_case = torch.device('''cuda''', local_rank)
__snake_case = socket.gethostname()
__snake_case = f"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group('''nccl''')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__snake_case = dist.get_rank()
__snake_case = dist.get_world_size()
printflock(f"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(f"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(f"""{gpu} is broken""")
raise
| 1 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
__snake_case : int = logging.get_logger(__name__)
__snake_case : Optional[int] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
__snake_case : Optional[int] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def _lowercase ( __snake_case ) -> Optional[Any]:
__lowerCAmelCase : List[str] = {}
with open(_lowercase ,"r" ) as file:
for line_number, line in enumerate(_lowercase ):
__lowerCAmelCase : Tuple = line.strip()
if line:
__lowerCAmelCase : List[str] = line.split()
__lowerCAmelCase : Tuple = line_number
__lowerCAmelCase : List[Any] = words[0]
__lowerCAmelCase : str = value
return result
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> List[str]:
for attribute in key.split("." ):
__lowerCAmelCase : Dict = getattr(_lowercase ,_lowercase )
__lowerCAmelCase : Tuple = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_lowercase ):
__lowerCAmelCase : Dict = PARAM_MAPPING[full_name.split("." )[-1]]
__lowerCAmelCase : Union[str, Any] = "param"
if weight_type is not None and weight_type != "param":
__lowerCAmelCase : Tuple = getattr(_lowercase ,_lowercase ).shape
elif weight_type is not None and weight_type == "param":
__lowerCAmelCase : Dict = hf_pointer
for attribute in hf_param_name.split("." ):
__lowerCAmelCase : str = getattr(_lowercase ,_lowercase )
__lowerCAmelCase : int = shape_pointer.shape
# let's reduce dimension
__lowerCAmelCase : List[str] = value[0]
else:
__lowerCAmelCase : Optional[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__lowerCAmelCase : List[Any] = value
elif weight_type == "weight_g":
__lowerCAmelCase : int = value
elif weight_type == "weight_v":
__lowerCAmelCase : List[str] = value
elif weight_type == "bias":
__lowerCAmelCase : Optional[int] = value
elif weight_type == "param":
for attribute in hf_param_name.split("." ):
__lowerCAmelCase : Dict = getattr(_lowercase ,_lowercase )
__lowerCAmelCase : Union[str, Any] = value
else:
__lowerCAmelCase : int = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> List[Any]:
__lowerCAmelCase : Optional[Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_lowercase ):
__lowerCAmelCase : Optional[Any] = PARAM_MAPPING[full_name.split("." )[-1]]
__lowerCAmelCase : Union[str, Any] = "param"
if weight_type is not None and weight_type != "param":
__lowerCAmelCase : str = ".".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__lowerCAmelCase : Any = ".".join([key, hf_param_name] )
else:
__lowerCAmelCase : Optional[int] = key
__lowerCAmelCase : List[str] = value if "lm_head" in full_key else value[0]
__snake_case : Optional[int] = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def _lowercase ( __snake_case ,__snake_case ,__snake_case=None ,__snake_case=None ) -> List[Any]:
__lowerCAmelCase : int = False
for key, mapped_key in MAPPING.items():
__lowerCAmelCase : Tuple = "wav2vec2." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
__lowerCAmelCase : Optional[Any] = True
if "*" in mapped_key:
__lowerCAmelCase : List[str] = name.split(_lowercase )[0].split("." )[-2]
__lowerCAmelCase : str = mapped_key.replace("*" ,_lowercase )
if "weight_g" in name:
__lowerCAmelCase : Union[str, Any] = "weight_g"
elif "weight_v" in name:
__lowerCAmelCase : List[str] = "weight_v"
elif "bias" in name:
__lowerCAmelCase : Optional[Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCAmelCase : Tuple = "weight"
else:
__lowerCAmelCase : Union[str, Any] = None
if hf_dict is not None:
rename_dict(_lowercase ,_lowercase ,_lowercase ,_lowercase ,_lowercase )
else:
set_recursively(_lowercase ,_lowercase ,_lowercase ,_lowercase ,_lowercase )
return is_used
return is_used
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Dict:
__lowerCAmelCase : List[Any] = []
__lowerCAmelCase : Any = fairseq_model.state_dict()
__lowerCAmelCase : List[Any] = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__lowerCAmelCase : int = False
if "conv_layers" in name:
load_conv_layer(
_lowercase ,_lowercase ,_lowercase ,_lowercase ,hf_model.config.feat_extract_norm == "group" ,)
__lowerCAmelCase : List[str] = True
else:
__lowerCAmelCase : Optional[Any] = load_wavaveca_layer(_lowercase ,_lowercase ,_lowercase )
if not is_used:
unused_weights.append(_lowercase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Any:
__lowerCAmelCase : Any = full_name.split("conv_layers." )[-1]
__lowerCAmelCase : Dict = name.split("." )
__lowerCAmelCase : Any = int(items[0] )
__lowerCAmelCase : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__lowerCAmelCase : int = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__lowerCAmelCase : Optional[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__lowerCAmelCase : List[str] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__lowerCAmelCase : Dict = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowercase )
@torch.no_grad()
def _lowercase ( __snake_case ,__snake_case ,__snake_case=None ,__snake_case=None ,__snake_case=True ,__snake_case=False ) -> Dict:
if config_path is not None:
__lowerCAmelCase : Union[str, Any] = WavaVecaConfig.from_pretrained(_lowercase )
else:
__lowerCAmelCase : str = WavaVecaConfig()
if is_seq_class:
__lowerCAmelCase : Union[str, Any] = read_txt_into_dict(_lowercase )
__lowerCAmelCase : Optional[Any] = idalabel
__lowerCAmelCase : str = WavaVecaForSequenceClassification(_lowercase )
__lowerCAmelCase : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=_lowercase ,return_attention_mask=_lowercase ,)
feature_extractor.save_pretrained(_lowercase )
elif is_finetuned:
if dict_path:
__lowerCAmelCase : Union[str, Any] = Dictionary.load(_lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowerCAmelCase : Tuple = target_dict.pad_index
__lowerCAmelCase : Dict = target_dict.bos_index
__lowerCAmelCase : int = target_dict.eos_index
__lowerCAmelCase : str = len(target_dict.symbols )
__lowerCAmelCase : Any = os.path.join(_lowercase ,"vocab.json" )
if not os.path.isdir(_lowercase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowercase ) )
return
os.makedirs(_lowercase ,exist_ok=_lowercase )
__lowerCAmelCase : Dict = target_dict.indices
# fairseq has the <pad> and <s> switched
__lowerCAmelCase : Union[str, Any] = 0
__lowerCAmelCase : Any = 1
with open(_lowercase ,"w" ,encoding="utf-8" ) as vocab_handle:
json.dump(_lowercase ,_lowercase )
__lowerCAmelCase : List[Any] = WavaVecaCTCTokenizer(
_lowercase ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="|" ,do_lower_case=_lowercase ,)
__lowerCAmelCase : Optional[Any] = True if config.feat_extract_norm == "layer" else False
__lowerCAmelCase : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=_lowercase ,return_attention_mask=_lowercase ,)
__lowerCAmelCase : Optional[int] = WavaVecaProcessor(feature_extractor=_lowercase ,tokenizer=_lowercase )
processor.save_pretrained(_lowercase )
__lowerCAmelCase : List[Any] = WavaVecaForCTC(_lowercase )
else:
__lowerCAmelCase : str = WavaVecaForPreTraining(_lowercase )
if is_finetuned or is_seq_class:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
__lowerCAmelCase : str = argparse.Namespace(task="audio_pretraining" )
__lowerCAmelCase : Dict = fairseq.tasks.setup_task(_lowercase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ,task=_lowercase )
__lowerCAmelCase : Any = model[0].eval()
recursively_load_weights(_lowercase ,_lowercase ,not is_finetuned )
hf_wavavec.save_pretrained(_lowercase )
if __name__ == "__main__":
__snake_case : int = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
__snake_case : int = parser.parse_args()
__snake_case : str = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
) | 293 |
import pytest
import datasets
# Import fixture modules as plugins
__snake_case = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def _A ( _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def _A ( _lowercase ) -> str:
"""simple docstring"""
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=_lowercase )
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
__UpperCamelCase = tmp_path_factory.getbasetemp() / 'cache'
__UpperCamelCase = test_hf_cache_home / 'datasets'
__UpperCamelCase = test_hf_cache_home / 'metrics'
__UpperCamelCase = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(_lowercase ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(_lowercase ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(_lowercase ) )
__UpperCamelCase = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(_lowercase ) )
__UpperCamelCase = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_lowercase ) )
@pytest.fixture(autouse=_lowercase , scope='session' )
def _A ( ) -> Dict:
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=_lowercase )
def _A ( _lowercase ) -> Tuple:
"""simple docstring"""
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , _lowercase )
@pytest.fixture
def _A ( _lowercase ) -> Any:
"""simple docstring"""
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , _lowercase )
| 1 | 0 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class A_ :
"""simple docstring"""
def __init__( self : str ,__A : Optional[Any] ,__A : Any ,__A : Optional[Any] ,__A : Dict ,__A : Union[str, Any] ,__A : Optional[int]=0.2 ,__A : Optional[Any]=0.2 ) -> Tuple:
_lowercase = bp_numa
_lowercase = bp_numa
_lowercase = bp_numa
_lowercase = conva_get[:2]
_lowercase = conva_get[2]
_lowercase = size_pa
_lowercase = rate_w
_lowercase = rate_t
_lowercase = [
np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
_lowercase = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
_lowercase = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
_lowercase = -2 * np.random.rand(self.conva[1] ) + 1
_lowercase = -2 * np.random.rand(self.num_bpa ) + 1
_lowercase = -2 * np.random.rand(self.num_bpa ) + 1
def __UpperCAmelCase ( self : Union[str, Any] ,__A : int ) -> Any:
_lowercase = {
'num_bp1': self.num_bpa,
'num_bp2': self.num_bpa,
'num_bp3': self.num_bpa,
'conv1': self.conva,
'step_conv1': self.step_conva,
'size_pooling1': self.size_poolinga,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conva,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conva,
'thre_bp2': self.thre_bpa,
'thre_bp3': self.thre_bpa,
}
with open(A_ ,'wb' ) as f:
pickle.dump(A_ ,A_ )
print(F"""Model saved: {save_path}""" )
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] ,__A : int ) -> List[Any]:
with open(A_ ,'rb' ) as f:
_lowercase = pickle.load(A_ ) # noqa: S301
_lowercase = model_dic.get('conv1' )
conv_get.append(model_dic.get('step_conv1' ) )
_lowercase = model_dic.get('size_pooling1' )
_lowercase = model_dic.get('num_bp1' )
_lowercase = model_dic.get('num_bp2' )
_lowercase = model_dic.get('num_bp3' )
_lowercase = model_dic.get('rate_weight' )
_lowercase = model_dic.get('rate_thre' )
# create model instance
_lowercase = CNN(A_ ,A_ ,A_ ,A_ ,A_ ,A_ ,A_ )
# modify model parameter
_lowercase = model_dic.get('w_conv1' )
_lowercase = model_dic.get('wkj' )
_lowercase = model_dic.get('vji' )
_lowercase = model_dic.get('thre_conv1' )
_lowercase = model_dic.get('thre_bp2' )
_lowercase = model_dic.get('thre_bp3' )
return conv_ins
def __UpperCAmelCase ( self : Optional[Any] ,__A : str ) -> Dict:
return 1 / (1 + np.exp(-1 * x ))
def __UpperCAmelCase ( self : Tuple ,__A : Tuple ) -> Tuple:
return round(A_ ,3 )
def __UpperCAmelCase ( self : Dict ,__A : Tuple ,__A : Any ,__A : Union[str, Any] ,__A : Optional[Any] ,__A : Any ) -> Dict:
_lowercase = convs[0]
_lowercase = convs[1]
_lowercase = np.shape(A_ )[0]
# get the data slice of original image data, data_focus
_lowercase = []
for i_focus in range(0 ,size_data - size_conv + 1 ,A_ ):
for j_focus in range(0 ,size_data - size_conv + 1 ,A_ ):
_lowercase = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(A_ )
# calculate the feature map of every single kernel, and saved as list of matrix
_lowercase = []
_lowercase = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(A_ ):
_lowercase = []
for i_focus in range(len(A_ ) ):
_lowercase = (
np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(A_ ) )
_lowercase = np.asmatrix(A_ ).reshape(
A_ ,A_ )
data_featuremap.append(A_ )
# expanding the data slice to One dimenssion
_lowercase = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(A_ ) )
_lowercase = np.asarray(A_ )
return focus_list, data_featuremap
def __UpperCAmelCase ( self : List[str] ,__A : List[str] ,__A : Dict ,__A : Any="average_pool" ) -> Tuple:
_lowercase = len(featuremaps[0] )
_lowercase = int(size_map / size_pooling )
_lowercase = []
for i_map in range(len(A_ ) ):
_lowercase = featuremaps[i_map]
_lowercase = []
for i_focus in range(0 ,A_ ,A_ ):
for j_focus in range(0 ,A_ ,A_ ):
_lowercase = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(A_ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(A_ ) )
_lowercase = np.asmatrix(A_ ).reshape(A_ ,A_ )
featuremap_pooled.append(A_ )
return featuremap_pooled
def __UpperCAmelCase ( self : List[str] ,__A : Optional[Any] ) -> Union[str, Any]:
_lowercase = []
for i in range(len(A_ ) ):
_lowercase = np.shape(data[i] )
_lowercase = data[i].reshape(1 ,shapes[0] * shapes[1] )
_lowercase = data_listed.getA().tolist()[0]
data_expanded.extend(A_ )
_lowercase = np.asarray(A_ )
return data_expanded
def __UpperCAmelCase ( self : Any ,__A : Optional[int] ) -> Tuple:
_lowercase = np.asarray(A_ )
_lowercase = np.shape(A_ )
_lowercase = data_mat.reshape(1 ,shapes[0] * shapes[1] )
return data_expanded
def __UpperCAmelCase ( self : int ,__A : str ,__A : int ,__A : Optional[int] ,__A : Any ,__A : str ) -> str:
_lowercase = []
_lowercase = 0
for i_map in range(A_ ):
_lowercase = np.ones((size_map, size_map) )
for i in range(0 ,A_ ,A_ ):
for j in range(0 ,A_ ,A_ ):
_lowercase = pd_pool[
i_pool
]
_lowercase = i_pool + 1
_lowercase = np.multiply(
A_ ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) )
pd_all.append(A_ )
return pd_all
def __UpperCAmelCase ( self : Tuple ,__A : Union[str, Any] ,__A : str ,__A : int ,__A : Union[str, Any] ,__A : str ,__A : Tuple=bool ) -> Tuple:
print('----------------------Start Training-------------------------' )
print((' - - Shape: Train_Data ', np.shape(A_ )) )
print((' - - Shape: Teach_Data ', np.shape(A_ )) )
_lowercase = 0
_lowercase = []
_lowercase = 1_0000
while rp < n_repeat and mse >= error_accuracy:
_lowercase = 0
print(F"""-------------Learning Time {rp}--------------""" )
for p in range(len(A_ ) ):
# print('------------Learning Image: %d--------------'%p)
_lowercase = np.asmatrix(datas_train[p] )
_lowercase = np.asarray(datas_teach[p] )
_lowercase , _lowercase = self.convolute(
A_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
_lowercase = self.pooling(A_ ,self.size_poolinga )
_lowercase = np.shape(A_ )
_lowercase = self._expand(A_ )
_lowercase = data_bp_input
_lowercase = np.dot(A_ ,self.vji.T ) - self.thre_bpa
_lowercase = self.sig(A_ )
_lowercase = np.dot(A_ ,self.wkj.T ) - self.thre_bpa
_lowercase = self.sig(A_ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
_lowercase = np.multiply(
(data_teach - bp_outa) ,np.multiply(A_ ,(1 - bp_outa) ) )
_lowercase = np.multiply(
np.dot(A_ ,self.wkj ) ,np.multiply(A_ ,(1 - bp_outa) ) )
_lowercase = np.dot(A_ ,self.vji )
_lowercase = pd_i_all / (self.size_poolinga * self.size_poolinga)
_lowercase = pd_conva_pooled.T.getA().tolist()
_lowercase = self._calculate_gradient_from_pool(
A_ ,A_ ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,)
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
_lowercase = self._expand_mat(pd_conva_all[k_conv] )
_lowercase = self.rate_weight * np.dot(A_ ,A_ )
_lowercase = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
_lowercase = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
_lowercase = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
_lowercase = self.vji + pd_j_all.T * bp_outa * self.rate_weight
_lowercase = self.thre_bpa - pd_k_all * self.rate_thre
_lowercase = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
_lowercase = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
_lowercase = rp + 1
_lowercase = error_count / patterns
all_mse.append(A_ )
def draw_error():
_lowercase = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(A_ ,'+-' )
plt.plot(A_ ,'r--' )
plt.xlabel('Learning Times' )
plt.ylabel('All_mse' )
plt.grid(A_ ,alpha=0.5 )
plt.show()
print('------------------Training Complished---------------------' )
print((' - - Training epoch: ', rp, F""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def __UpperCAmelCase ( self : Optional[Any] ,__A : List[Any] ) -> List[Any]:
_lowercase = []
print('-------------------Start Testing-------------------------' )
print((' - - Shape: Test_Data ', np.shape(A_ )) )
for p in range(len(A_ ) ):
_lowercase = np.asmatrix(datas_test[p] )
_lowercase , _lowercase = self.convolute(
A_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
_lowercase = self.pooling(A_ ,self.size_poolinga )
_lowercase = self._expand(A_ )
_lowercase = data_bp_input
_lowercase = bp_outa * self.vji.T - self.thre_bpa
_lowercase = self.sig(A_ )
_lowercase = bp_outa * self.wkj.T - self.thre_bpa
_lowercase = self.sig(A_ )
produce_out.extend(bp_outa.getA().tolist() )
_lowercase = [list(map(self.do_round ,A_ ) ) for each in produce_out]
return np.asarray(A_ )
def __UpperCAmelCase ( self : Tuple ,__A : Union[str, Any] ) -> Union[str, Any]:
_lowercase = np.asmatrix(A_ )
_lowercase , _lowercase = self.convolute(
A_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
_lowercase = self.pooling(A_ ,self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass | 67 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = VideoToVideoSDPipeline
_lowercase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"""video"""} ) - {"""image""", """width""", """height"""}
_lowercase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""video"""} ) - {"""image"""}
_lowercase = PipelineTesterMixin.required_optional_params - {"""latents"""}
_lowercase = False
# No `output_type`.
_lowercase = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64),layers_per_block=2,sample_size=32,in_channels=4,out_channels=4,down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D'),up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D'),cross_attention_dim=32,attention_head_dim=4,)
__UpperCamelCase = DDIMScheduler(
beta_start=0.0_0_0_8_5,beta_end=0.0_1_2,beta_schedule='scaled_linear',clip_sample=A_,set_alpha_to_one=A_,)
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64],in_channels=3,out_channels=3,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'],up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'],latent_channels=4,sample_size=128,)
torch.manual_seed(0 )
__UpperCamelCase = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,hidden_act='gelu',projection_dim=512,)
__UpperCamelCase = CLIPTextModel(A_ )
__UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__UpperCamelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def snake_case_ ( self: Union[str, Any],A_: Any,A_: Any=0 ):
'''simple docstring'''
__UpperCamelCase = floats_tensor((1, 3, 3, 32, 32),rng=random.Random(A_ ) ).to(A_ )
if str(A_ ).startswith('mps' ):
__UpperCamelCase = torch.manual_seed(A_ )
else:
__UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
__UpperCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'video': video,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = VideoToVideoSDPipeline(**A_ )
__UpperCamelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = self.get_dummy_inputs(A_ )
__UpperCamelCase = 'np'
__UpperCamelCase = sd_pipe(**A_ ).frames
__UpperCamelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
__UpperCamelCase = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(),reason='XFormers attention is only available with CUDA and `xformers` installed',)
def snake_case_ ( self: Any ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A_,expected_max_diff=5E-3 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def snake_case_ ( self: str ):
'''simple docstring'''
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def snake_case_ ( self: int ):
'''simple docstring'''
pass
def snake_case_ ( self: Any ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL',torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
__UpperCamelCase = torch.Generator(device='cpu' ).manual_seed(0 )
__UpperCamelCase = torch.randn((1, 10, 3, 1024, 576),generator=A_ )
__UpperCamelCase = video.to('cuda' )
__UpperCamelCase = 'Spiderman is surfing'
__UpperCamelCase = pipe(A_,video=A_,generator=A_,num_inference_steps=3,output_type='pt' ).frames
__UpperCamelCase = np.array([-1.0_4_5_8_9_8_4, -1.1_2_7_9_2_9_7, -0.9_6_6_3_0_8_6, -0.9_1_5_0_3_9_0_6, -0.7_5_0_9_7_6_5_6] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 1 | 0 |
def lowercase_( SCREAMING_SNAKE_CASE_ = 1000000 ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = set(range(3 , _lowercase , 2 ) )
primes.add(2 )
for p in range(3 , _lowercase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _lowercase , _lowercase ) ) )
lowerCamelCase : Tuple = [float(_lowercase ) for n in range(limit + 1 )]
for p in primes:
for n in range(_lowercase , limit + 1 , _lowercase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 340 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
__snake_case = parser.parse_args()
__snake_case = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
__snake_case = CLIPImageProcessor()
__snake_case = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
__snake_case = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 1 | 0 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__lowerCamelCase : Union[str, Any] = (720, 1280) # Height, Width
__lowerCamelCase : Optional[Any] = (0.4, 0.6) # if height or width lower than this scale, drop it.
__lowerCamelCase : Optional[int] = 1 / 100
__lowerCamelCase : List[Any] = """"""
__lowerCamelCase : List[str] = """"""
__lowerCamelCase : int = """"""
__lowerCamelCase : str = 250
def SCREAMING_SNAKE_CASE ( ):
snake_case__, snake_case__ : Tuple = get_dataset(_lowercase , _lowercase )
for index in range(_lowercase ):
snake_case__ : Any = random.sample(range(len(_lowercase ) ) , 4 )
snake_case__, snake_case__, snake_case__ : Dict = update_image_and_anno(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , filter_scale=_lowercase , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
snake_case__ : Tuple = random_chars(32 )
snake_case__ : Dict = path.split(os.sep )[-1].rsplit("." , 1 )[0]
snake_case__ : str = F'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(F'''{file_root}.jpg''' , _lowercase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
snake_case__ : int = []
for anno in new_annos:
snake_case__ : int = anno[3] - anno[1]
snake_case__ : Dict = anno[4] - anno[2]
snake_case__ : Dict = anno[1] + width / 2
snake_case__ : List[str] = anno[2] + height / 2
snake_case__ : List[Any] = F'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(_lowercase )
with open(F'''{file_root}.txt''' , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Optional[int] ):
snake_case__ : Any = []
snake_case__ : Tuple = []
for label_file in glob.glob(os.path.join(_lowercase , "*.txt" ) ):
snake_case__ : Optional[Any] = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(_lowercase ) as in_file:
snake_case__ : Dict = in_file.readlines()
snake_case__ : str = os.path.join(_lowercase , F'''{label_name}.jpg''' )
snake_case__ : str = []
for obj_list in obj_lists:
snake_case__ : Tuple = obj_list.rstrip("\n" ).split(" " )
snake_case__ : List[str] = float(obj[1] ) - float(obj[3] ) / 2
snake_case__ : int = float(obj[2] ) - float(obj[4] ) / 2
snake_case__ : Union[str, Any] = float(obj[1] ) + float(obj[3] ) / 2
snake_case__ : Dict = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(_lowercase )
labels.append(_lowercase )
return img_paths, labels
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Dict , snake_case_ : Optional[Any] , snake_case_ : Any , snake_case_ : str , snake_case_ : Optional[int] = 0.0 , ):
snake_case__ : Union[str, Any] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
snake_case__ : Optional[int] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
snake_case__ : Optional[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
snake_case__ : Tuple = int(scale_x * output_size[1] )
snake_case__ : Optional[Any] = int(scale_y * output_size[0] )
snake_case__ : int = []
snake_case__ : List[str] = []
for i, index in enumerate(_lowercase ):
snake_case__ : Dict = all_img_list[index]
path_list.append(_lowercase )
snake_case__ : Any = all_annos[index]
snake_case__ : Dict = cva.imread(_lowercase )
if i == 0: # top-left
snake_case__ : int = cva.resize(_lowercase , (divid_point_x, divid_point_y) )
snake_case__ : Union[str, Any] = img
for bbox in img_annos:
snake_case__ : Tuple = bbox[1] * scale_x
snake_case__ : Optional[int] = bbox[2] * scale_y
snake_case__ : List[Any] = bbox[3] * scale_x
snake_case__ : Any = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
snake_case__ : Union[str, Any] = cva.resize(_lowercase , (output_size[1] - divid_point_x, divid_point_y) )
snake_case__ : Union[str, Any] = img
for bbox in img_annos:
snake_case__ : List[str] = scale_x + bbox[1] * (1 - scale_x)
snake_case__ : Any = bbox[2] * scale_y
snake_case__ : List[Any] = scale_x + bbox[3] * (1 - scale_x)
snake_case__ : str = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
snake_case__ : Any = cva.resize(_lowercase , (divid_point_x, output_size[0] - divid_point_y) )
snake_case__ : List[str] = img
for bbox in img_annos:
snake_case__ : Union[str, Any] = bbox[1] * scale_x
snake_case__ : Optional[Any] = scale_y + bbox[2] * (1 - scale_y)
snake_case__ : Any = bbox[3] * scale_x
snake_case__ : Tuple = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
snake_case__ : Optional[int] = cva.resize(
_lowercase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
snake_case__ : Optional[int] = img
for bbox in img_annos:
snake_case__ : Tuple = scale_x + bbox[1] * (1 - scale_x)
snake_case__ : Any = scale_y + bbox[2] * (1 - scale_y)
snake_case__ : str = scale_x + bbox[3] * (1 - scale_x)
snake_case__ : Optional[int] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
snake_case__ : List[str] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] ):
assert number_char > 1, "The number of character should greater than 1"
snake_case__ : List[str] = ascii_lowercase + digits
return "".join(random.choice(_lowercase ) for _ in range(_lowercase ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 297 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 1 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.json"}
__A = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
__A = {"mgp-str": 27}
class A ( _a ):
lowerCamelCase : Tuple = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , lowerCamelCase__ , lowerCamelCase__="[GO]" , lowerCamelCase__="[GO]" , lowerCamelCase__="[s]" , lowerCamelCase__="[GO]" , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
super().__init__(
unk_token=A_ , bos_token=A_ , eos_token=A_ , pad_token=A_ , **A_ , )
with open(A_ , encoding="""utf-8""" ) as vocab_handle:
lowercase__ = json.load(A_ )
lowercase__ = {v: k for k, v in self.vocab.items()}
@property
def A__ ( self ) -> str:
'''simple docstring'''
return len(self.vocab )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def A__ ( self , lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
lowercase__ = []
for s in text:
char_tokens.extend(A_ )
return char_tokens
def A__ ( self , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
return self.vocab.get(A_ , self.vocab.get(self.unk_token ) )
def A__ ( self , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
return self.decoder.get(A_ )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple:
'''simple docstring'''
if not os.path.isdir(A_ ):
logger.error("""Vocabulary path ({}) should be a directory""".format(A_ ) )
return
lowercase__ = os.path.join(
A_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
with open(A_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + """\n""" )
return (vocab_file,)
| 325 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
__snake_case = '''src/diffusers'''
# Matches is_xxx_available()
__snake_case = re.compile(r'''is\_([a-z_]*)_available\(\)''')
# Matches from xxx import bla
__snake_case = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
__snake_case = '''
{0} = None
'''
__snake_case = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
'''
__snake_case = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
def _A ( _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = _re_backend.findall(_lowercase )
if len(_lowercase ) == 0:
return None
return "_and_".join(_lowercase )
def _A ( ) -> Tuple:
"""simple docstring"""
with open(os.path.join(_lowercase , '__init__.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
__UpperCamelCase = f.readlines()
# Get to the point we do the actual imports for type checking
__UpperCamelCase = 0
__UpperCamelCase = {}
# Go through the end of the file
while line_index < len(_lowercase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
__UpperCamelCase = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('else:' ):
line_index += 1
line_index += 1
__UpperCamelCase = []
# Until we unindent, add backend objects to the list
while line_index < len(_lowercase ) and len(lines[line_index] ) > 1:
__UpperCamelCase = lines[line_index]
__UpperCamelCase = _re_single_line_import.search(_lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(_lowercase ) > 0:
__UpperCamelCase = objects
else:
line_index += 1
return backend_specific_objects
def _A ( _lowercase , _lowercase ) -> Union[str, Any]:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(_lowercase )
elif name.islower():
return DUMMY_FUNCTION.format(_lowercase , _lowercase )
else:
return DUMMY_CLASS.format(_lowercase , _lowercase )
def _A ( _lowercase=None ) -> Optional[Any]:
"""simple docstring"""
if backend_specific_objects is None:
__UpperCamelCase = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
__UpperCamelCase = {}
for backend, objects in backend_specific_objects.items():
__UpperCamelCase = '[' + ', '.join(f'''"{b}"''' for b in backend.split('_and_' ) ) + ']'
__UpperCamelCase = '# This file is autogenerated by the command `make fix-copies`, do not edit.\n'
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(_lowercase , _lowercase ) for o in objects] )
__UpperCamelCase = dummy_file
return dummy_files
def _A ( _lowercase=False ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
__UpperCamelCase = {'torch': 'pt'}
# Locate actual dummy modules and read their content.
__UpperCamelCase = os.path.join(_lowercase , 'utils' )
__UpperCamelCase = {
backend: os.path.join(_lowercase , f'''dummy_{short_names.get(_lowercase , _lowercase )}_objects.py''' )
for backend in dummy_files.keys()
}
__UpperCamelCase = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(_lowercase ):
with open(_lowercase , 'r' , encoding='utf-8' , newline='\n' ) as f:
__UpperCamelCase = f.read()
else:
__UpperCamelCase = ''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f'''Updating diffusers.utils.dummy_{short_names.get(_lowercase , _lowercase )}_objects.py as the main '''
'__init__ has new objects.' )
with open(dummy_file_paths[backend] , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'The main __init__ has objects that are not present in '
f'''diffusers.utils.dummy_{short_names.get(_lowercase , _lowercase )}_objects.py. Run `make fix-copies` '''
'to fix this.' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__snake_case = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 1 | 0 |
"""simple docstring"""
def lowerCamelCase_ ( __lowerCAmelCase ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def lowerCamelCase_ ( __lowerCAmelCase ) -> bool:
'''simple docstring'''
lowerCamelCase__ =credit_card_number
lowerCamelCase__ =0
lowerCamelCase__ =len(_lowercase ) - 2
for i in range(_lowercase , -1 , -2 ):
# double the value of every second digit
lowerCamelCase__ =int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
lowerCamelCase__ =cc_number[:i] + str(_lowercase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_lowercase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def lowerCamelCase_ ( __lowerCAmelCase ) -> bool:
'''simple docstring'''
lowerCamelCase__ =F'''{credit_card_number} is an invalid credit card number because'''
if not credit_card_number.isdigit():
print(F'''{error_message} it has nonnumerical characters.''' )
return False
if not 13 <= len(_lowercase ) <= 16:
print(F'''{error_message} of its length.''' )
return False
if not validate_initial_digits(_lowercase ):
print(F'''{error_message} of its first two digits.''' )
return False
if not luhn_validation(_lowercase ):
print(F'''{error_message} it fails the Luhn check.''' )
return False
print(F'''{credit_card_number} is a valid credit card number.''' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('4111111111111111')
validate_credit_card_number('32323')
| 530 |
import string
def _A ( _lowercase ) -> None:
"""simple docstring"""
for key in range(len(string.ascii_uppercase ) ):
__UpperCamelCase = ''
for symbol in message:
if symbol in string.ascii_uppercase:
__UpperCamelCase = string.ascii_uppercase.find(_lowercase )
__UpperCamelCase = num - key
if num < 0:
__UpperCamelCase = num + len(string.ascii_uppercase )
__UpperCamelCase = translated + string.ascii_uppercase[num]
else:
__UpperCamelCase = translated + symbol
print(f'''Decryption using Key #{key}: {translated}''' )
def _A ( ) -> None:
"""simple docstring"""
__UpperCamelCase = input('Encrypted message: ' )
__UpperCamelCase = message.upper()
decrypt(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 1 | 0 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[int] = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class __lowercase ( _a ):
def __init__( self , a__=None , a__=None , *a__ , **a__ ) -> Tuple:
'''simple docstring'''
super().__init__(*A_ , **A_ )
if config is None:
assert isinstance(self.model , A_ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F" {self.model.__class__}"
)
A_ = self.model.config
else:
A_ = config
A_ = data_args
A_ = self.config.tgt_vocab_size if isinstance(self.config , A_ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F"The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"
''' padding..''' )
if self.args.label_smoothing == 0:
A_ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
A_ = label_smoothed_nll_loss
def lowerCAmelCase_ ( self , a__ ) -> Tuple:
'''simple docstring'''
if self.optimizer is None:
A_ = ['''bias''', '''LayerNorm.weight''']
A_ = [
{
'''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'''weight_decay''': self.args.weight_decay,
},
{
'''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
A_ = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
A_ = Adafactor
A_ = {'''scale_parameter''': False, '''relative_step''': False}
else:
A_ = AdamW
A_ = {
'''betas''': (self.args.adam_betaa, self.args.adam_betaa),
'''eps''': self.args.adam_epsilon,
}
A_ = self.args.learning_rate
if self.sharded_ddp:
A_ = OSS(
params=A_ , optim=A_ , **A_ , )
else:
A_ = optimizer_cls(A_ , **A_ )
if self.lr_scheduler is None:
A_ = self._get_lr_scheduler(A_ )
else: # ignoring --lr_scheduler
logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' )
def lowerCAmelCase_ ( self , a__ ) -> Dict:
'''simple docstring'''
A_ = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
A_ = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
A_ = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
A_ = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=A_ )
return scheduler
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowerCAmelCase_ ( self , a__ , a__ , a__ ) -> Optional[int]:
'''simple docstring'''
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
A_ = model(**A_ , use_cache=A_ )[0]
A_ = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
A_ , A_ = model(**A_ , labels=A_ , use_cache=A_ )[:2]
else:
# compute label smoothed loss
A_ = model(**A_ , use_cache=A_ )[0]
A_ = torch.nn.functional.log_softmax(A_ , dim=-1 )
A_ , A_ = self.loss_fn(A_ , A_ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def lowerCAmelCase_ ( self , a__ , a__ ) -> Tuple:
'''simple docstring'''
A_ = inputs.pop('''labels''' )
A_ , A_ = self._compute_loss(A_ , A_ , A_ )
return loss
def lowerCAmelCase_ ( self , a__ , a__ , a__ , a__ = None , ) -> Dict:
'''simple docstring'''
A_ = self._prepare_inputs(A_ )
A_ = {
'''max_length''': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
A_ = self.model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **A_ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
A_ = self._pad_tensors_to_max_len(A_ , gen_kwargs['''max_length'''] )
A_ = inputs.pop('''labels''' )
with torch.no_grad():
# compute loss on predict data
A_ , A_ = self._compute_loss(A_ , A_ , A_ )
A_ = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
A_ = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
A_ = self._pad_tensors_to_max_len(A_ , gen_kwargs['''max_length'''] )
return (loss, logits, labels)
def lowerCAmelCase_ ( self , a__ , a__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'''
F" padded to `max_length`={max_length}" )
A_ = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
A_ = tensor
return padded_tensor | 141 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = KandinskyInpaintPipeline
_lowercase = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
_lowercase = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
_lowercase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_lowercase = False
@property
def snake_case_ ( self: int ):
'''simple docstring'''
return 32
@property
def snake_case_ ( self: str ):
'''simple docstring'''
return 32
@property
def snake_case_ ( self: Tuple ):
'''simple docstring'''
return self.time_input_dim
@property
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
return 100
@property
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def snake_case_ ( self: Any ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = MCLIPConfig(
numDims=self.cross_attention_dim,transformerDimensions=self.text_embedder_hidden_size,hidden_size=self.text_embedder_hidden_size,intermediate_size=37,num_attention_heads=4,num_hidden_layers=5,vocab_size=1005,)
__UpperCamelCase = MultilingualCLIP(A_ )
__UpperCamelCase = text_encoder.eval()
return text_encoder
@property
def snake_case_ ( self: Any ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__UpperCamelCase = UNetaDConditionModel(**A_ )
return model
@property
def snake_case_ ( self: str ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def snake_case_ ( self: str ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = self.dummy_tokenizer
__UpperCamelCase = self.dummy_unet
__UpperCamelCase = self.dummy_movq
__UpperCamelCase = DDIMScheduler(
num_train_timesteps=1000,beta_schedule='linear',beta_start=0.0_0_0_8_5,beta_end=0.0_1_2,clip_sample=A_,set_alpha_to_one=A_,steps_offset=1,prediction_type='epsilon',thresholding=A_,)
__UpperCamelCase = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def snake_case_ ( self: Tuple,A_: Optional[int],A_: Dict=0 ):
'''simple docstring'''
__UpperCamelCase = floats_tensor((1, self.cross_attention_dim),rng=random.Random(A_ ) ).to(A_ )
__UpperCamelCase = floats_tensor((1, self.cross_attention_dim),rng=random.Random(seed + 1 ) ).to(A_ )
# create init_image
__UpperCamelCase = floats_tensor((1, 3, 64, 64),rng=random.Random(A_ ) ).to(A_ )
__UpperCamelCase = image.cpu().permute(0,2,3,1 )[0]
__UpperCamelCase = Image.fromarray(np.uinta(A_ ) ).convert('RGB' ).resize((256, 256) )
# create mask
__UpperCamelCase = np.ones((64, 64),dtype=np.floataa )
__UpperCamelCase = 0
if str(A_ ).startswith('mps' ):
__UpperCamelCase = torch.manual_seed(A_ )
else:
__UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
__UpperCamelCase = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = 'cpu'
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = self.pipeline_class(**A_ )
__UpperCamelCase = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = pipe(**self.get_dummy_inputs(A_ ) )
__UpperCamelCase = output.images
__UpperCamelCase = pipe(
**self.get_dummy_inputs(A_ ),return_dict=A_,)[0]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
print(F'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' )
__UpperCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
__UpperCamelCase = np.ones((768, 768),dtype=np.floataa )
__UpperCamelCase = 0
__UpperCamelCase = 'a hat'
__UpperCamelCase = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior',torch_dtype=torch.floataa )
pipe_prior.to(A_ )
__UpperCamelCase = KandinskyInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-inpaint',torch_dtype=torch.floataa )
__UpperCamelCase = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
__UpperCamelCase = torch.Generator(device='cpu' ).manual_seed(0 )
__UpperCamelCase, __UpperCamelCase = pipe_prior(
A_,generator=A_,num_inference_steps=5,negative_prompt='',).to_tuple()
__UpperCamelCase = pipeline(
A_,image=A_,mask_image=A_,image_embeds=A_,negative_image_embeds=A_,generator=A_,num_inference_steps=100,height=768,width=768,output_type='np',)
__UpperCamelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(A_,A_ )
| 1 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__UpperCAmelCase = {"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST",
"UniSpeechForCTC",
"UniSpeechForPreTraining",
"UniSpeechForSequenceClassification",
"UniSpeechModel",
"UniSpeechPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 329 |
from typing import Any
class __lowerCamelCase :
def __init__( self: int,A_: Any ):
'''simple docstring'''
__UpperCamelCase = data
__UpperCamelCase = None
def __repr__( self: Any ):
'''simple docstring'''
return F'''Node({self.data})'''
class __lowerCamelCase :
def __init__( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = None
def __iter__( self: int ):
'''simple docstring'''
__UpperCamelCase = self.head
while node:
yield node.data
__UpperCamelCase = node.next
def __len__( self: List[str] ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self: Any ):
'''simple docstring'''
return "->".join([str(A_ ) for item in self] )
def __getitem__( self: int,A_: int ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self: int,A_: int,A_: Any ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
__UpperCamelCase = self.head
for _ in range(A_ ):
__UpperCamelCase = current.next
__UpperCamelCase = data
def snake_case_ ( self: Union[str, Any],A_: Any ):
'''simple docstring'''
self.insert_nth(len(self ),A_ )
def snake_case_ ( self: List[Any],A_: Any ):
'''simple docstring'''
self.insert_nth(0,A_ )
def snake_case_ ( self: Optional[Any],A_: int,A_: Any ):
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
__UpperCamelCase = Node(A_ )
if self.head is None:
__UpperCamelCase = new_node
elif index == 0:
__UpperCamelCase = self.head # link new_node to head
__UpperCamelCase = new_node
else:
__UpperCamelCase = self.head
for _ in range(index - 1 ):
__UpperCamelCase = temp.next
__UpperCamelCase = temp.next
__UpperCamelCase = new_node
def snake_case_ ( self: str ): # print every node data
'''simple docstring'''
print(self )
def snake_case_ ( self: int ):
'''simple docstring'''
return self.delete_nth(0 )
def snake_case_ ( self: str ): # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def snake_case_ ( self: Any,A_: int = 0 ):
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
__UpperCamelCase = self.head # default first node
if index == 0:
__UpperCamelCase = self.head.next
else:
__UpperCamelCase = self.head
for _ in range(index - 1 ):
__UpperCamelCase = temp.next
__UpperCamelCase = temp.next
__UpperCamelCase = temp.next.next
return delete_node.data
def snake_case_ ( self: Any ):
'''simple docstring'''
return self.head is None
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = None
__UpperCamelCase = self.head
while current:
# Store the current node's next node.
__UpperCamelCase = current.next
# Make the current node's next point backwards
__UpperCamelCase = prev
# Make the previous node be the current node
__UpperCamelCase = current
# Make the current node the next node (to progress iteration)
__UpperCamelCase = next_node
# Return prev in order to put the head at the end
__UpperCamelCase = prev
def _A ( ) -> None:
"""simple docstring"""
__UpperCamelCase = LinkedList()
assert linked_list.is_empty() is True
assert str(_lowercase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_lowercase ) == i
linked_list.insert_nth(_lowercase , i + 1 )
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_lowercase ) == 9
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
__UpperCamelCase = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(-8 , 1 ) )
def _A ( ) -> None:
"""simple docstring"""
__UpperCamelCase = [
-9,
1_00,
Node(77_34_51_12 ),
'dlrow olleH',
7,
55_55,
0,
-1_92.5_55_55,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
__UpperCamelCase = LinkedList()
for i in test_input:
linked_list.insert_tail(_lowercase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_lowercase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
__UpperCamelCase = linked_list.delete_head()
assert result == -9
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
__UpperCamelCase = linked_list.delete_tail()
assert result == 12.2
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
__UpperCamelCase = linked_list.delete_nth(10 )
assert result is None
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(_lowercase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_lowercase )
assert (
str(_lowercase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_lowercase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _A ( ) -> List[str]:
"""simple docstring"""
from doctest import testmod
testmod()
__UpperCamelCase = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(_lowercase )
print('\nReading/changing Node data using indexing:' )
print(f'''Element at Position 1: {linked_list[1]}''' )
__UpperCamelCase = input('Enter New Value: ' ).strip()
print('New list:' )
print(_lowercase )
print(f'''length of linked_list is : {len(_lowercase )}''' )
if __name__ == "__main__":
main()
| 1 | 0 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
A : List[Any] = IFImgaImgSuperResolutionPipeline
A : Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'}
A : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'} )
A : Optional[Any] = PipelineTesterMixin.required_optional_params - {'latents'}
def __lowerCamelCase ( self ):
return self._get_superresolution_dummy_components()
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ):
if str(A_ ).startswith('''mps''' ):
lowercase : Optional[Any] = torch.manual_seed(A_ )
else:
lowercase : Optional[int] = torch.Generator(device=A_ ).manual_seed(A_ )
lowercase : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ )
lowercase : str = floats_tensor((1, 3, 16, 16) , rng=random.Random(A_ ) ).to(A_ )
lowercase : List[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __lowerCamelCase ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __lowerCamelCase ( self ):
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __lowerCamelCase ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __lowerCamelCase ( self ):
self._test_save_load_local()
def __lowerCamelCase ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 319 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 1 | 0 |
'''simple docstring'''
def a ( UpperCamelCase_ : Any = 1000 ) -> int:
snake_case__ =3
snake_case__ =0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 538 |
__snake_case = {
'''a''': '''AAAAA''',
'''b''': '''AAAAB''',
'''c''': '''AAABA''',
'''d''': '''AAABB''',
'''e''': '''AABAA''',
'''f''': '''AABAB''',
'''g''': '''AABBA''',
'''h''': '''AABBB''',
'''i''': '''ABAAA''',
'''j''': '''BBBAA''',
'''k''': '''ABAAB''',
'''l''': '''ABABA''',
'''m''': '''ABABB''',
'''n''': '''ABBAA''',
'''o''': '''ABBAB''',
'''p''': '''ABBBA''',
'''q''': '''ABBBB''',
'''r''': '''BAAAA''',
'''s''': '''BAAAB''',
'''t''': '''BAABA''',
'''u''': '''BAABB''',
'''v''': '''BBBAB''',
'''w''': '''BABAA''',
'''x''': '''BABAB''',
'''y''': '''BABBA''',
'''z''': '''BABBB''',
''' ''': ''' ''',
}
__snake_case = {value: key for key, value in encode_dict.items()}
def _A ( _lowercase ) -> str:
"""simple docstring"""
__UpperCamelCase = ''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('encode() accepts only letters of the alphabet and spaces' )
return encoded
def _A ( _lowercase ) -> str:
"""simple docstring"""
if set(_lowercase ) - {"A", "B", " "} != set():
raise Exception('decode() accepts only \'A\', \'B\' and spaces' )
__UpperCamelCase = ''
for word in coded.split():
while len(_lowercase ) != 0:
decoded += decode_dict[word[:5]]
__UpperCamelCase = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 | 0 |
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case =logging.get_logger(__name__)
def a_ ( lowerCamelCase : Dict ):
lowerCAmelCase = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError('Quantized models are not supported.' )
lowerCAmelCase = re.match(R'^mobilenet_v1_([^_]*)_([^_]*)$' , _lowercase )
if matches:
lowerCAmelCase = float(matches[1] )
lowerCAmelCase = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
lowerCAmelCase = 1001
lowerCAmelCase = 'imagenet-1k-id2label.json'
lowerCAmelCase = 'huggingface/label-files'
lowerCAmelCase = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase = {int(_lowercase ) + 1: v for k, v in idalabel.items()}
lowerCAmelCase = 'background'
lowerCAmelCase = idalabel
lowerCAmelCase = {v: k for k, v in idalabel.items()}
return config
def a_ ( ):
lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def a_ ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : List[Any]=False ):
lowerCAmelCase = get_mobilenet_va_config(_lowercase )
# Load 🤗 model
lowerCAmelCase = MobileNetVaForImageClassification(_lowercase ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(_lowercase , _lowercase , _lowercase )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
lowerCAmelCase = MobileNetVaImageProcessor(
crop_size={'width': config.image_size, 'height': config.image_size} , size={'shortest_edge': config.image_size + 32} , )
lowerCAmelCase = image_processor(images=prepare_img() , return_tensors='pt' )
lowerCAmelCase = model(**_lowercase )
lowerCAmelCase = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
lowerCAmelCase = torch.tensor([-4.1_739, -1.1_233, 3.1_205] )
elif model_name == "mobilenet_v1_0.75_192":
lowerCAmelCase = torch.tensor([-3.9_440, -2.3_141, -0.3_333] )
else:
lowerCAmelCase = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , _lowercase , atol=1e-4 )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowercase )
if push_to_hub:
print('Pushing to the hub...' )
lowerCAmelCase = 'google/' + model_name
image_processor.push_to_hub(_lowercase )
model.push_to_hub(_lowercase )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""mobilenet_v1_1.0_224""",
type=str,
help="""Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.""",
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original TensorFlow checkpoint (.ckpt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__snake_case =parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 133 |
from collections.abc import Generator
from math import sin
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
if len(_lowercase ) != 32:
raise ValueError('Input must be of length 32' )
__UpperCamelCase = B''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__UpperCamelCase = format(_lowercase , '08x' )[-8:]
__UpperCamelCase = B''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
__UpperCamelCase = B''
for char in message:
bit_string += format(_lowercase , '08b' ).encode('utf-8' )
__UpperCamelCase = format(len(_lowercase ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(_lowercase ) % 5_12 != 4_48:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def _A ( _lowercase ) -> Generator[list[int], None, None]:
"""simple docstring"""
if len(_lowercase ) % 5_12 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(_lowercase ) , 5_12 ):
__UpperCamelCase = bit_string[pos : pos + 5_12]
__UpperCamelCase = []
for i in range(0 , 5_12 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def _A ( _lowercase ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__UpperCamelCase = format(_lowercase , '032b' )
__UpperCamelCase = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(_lowercase , 2 )
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
return (a + b) % 2**32
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
__UpperCamelCase = preprocess(_lowercase )
__UpperCamelCase = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
__UpperCamelCase = 0X67_45_23_01
__UpperCamelCase = 0Xef_cd_ab_89
__UpperCamelCase = 0X98_ba_dc_fe
__UpperCamelCase = 0X10_32_54_76
__UpperCamelCase = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(_lowercase ):
__UpperCamelCase = aa
__UpperCamelCase = ba
__UpperCamelCase = ca
__UpperCamelCase = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__UpperCamelCase = d ^ (b & (c ^ d))
__UpperCamelCase = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__UpperCamelCase = c ^ (d & (b ^ c))
__UpperCamelCase = (5 * i + 1) % 16
elif i <= 47:
__UpperCamelCase = b ^ c ^ d
__UpperCamelCase = (3 * i + 5) % 16
else:
__UpperCamelCase = c ^ (b | not_aa(_lowercase ))
__UpperCamelCase = (7 * i) % 16
__UpperCamelCase = (f + a + added_consts[i] + block_words[g]) % 2**32
__UpperCamelCase = d
__UpperCamelCase = c
__UpperCamelCase = b
__UpperCamelCase = sum_aa(_lowercase , left_rotate_aa(_lowercase , shift_amounts[i] ) )
# Add hashed chunk to running total
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = reformat_hex(_lowercase ) + reformat_hex(_lowercase ) + reformat_hex(_lowercase ) + reformat_hex(_lowercase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | 0 |
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class A__ ( _a ):
'''simple docstring'''
def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: str = "▁" , _SCREAMING_SNAKE_CASE: bool = True , _SCREAMING_SNAKE_CASE: Union[str, AddedToken] = "<unk>" , _SCREAMING_SNAKE_CASE: Union[str, AddedToken] = "</s>" , _SCREAMING_SNAKE_CASE: Union[str, AddedToken] = "<pad>" , ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
__lowerCAmelCase : Tuple = [None] * len(self.special_tokens)
for token_dict in self.special_tokens.values():
__lowerCAmelCase : Dict = token_dict["token"]
__lowerCAmelCase : str = Tokenizer(Unigram())
__lowerCAmelCase : Dict = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}") , " "),
normalizers.Lowercase(),
])
__lowerCAmelCase : Tuple = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=A_ , add_prefix_space=A_),
pre_tokenizers.Digits(individual_digits=A_),
pre_tokenizers.Punctuation(),
])
__lowerCAmelCase : int = decoders.Metaspace(replacement=A_ , add_prefix_space=A_)
__lowerCAmelCase : List[Any] = TemplateProcessing(
single=F"""$A {self.special_tokens['eos']['token']}""" , special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])] , )
__lowerCAmelCase : Optional[int] = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(A_ , A_)
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: Union[str, List[str]] , _SCREAMING_SNAKE_CASE: int = 8000 , _SCREAMING_SNAKE_CASE: bool = True , ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : int = trainers.UnigramTrainer(
vocab_size=A_ , special_tokens=self.special_tokens_list , show_progress=A_ , )
if isinstance(A_ , A_):
__lowerCAmelCase : Union[str, Any] = [files]
self._tokenizer.train(A_ , trainer=A_)
self.add_unk_id()
def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: Union[Iterator[str], Iterator[Iterator[str]]] , _SCREAMING_SNAKE_CASE: int = 8000 , _SCREAMING_SNAKE_CASE: bool = True , ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = trainers.UnigramTrainer(
vocab_size=A_ , special_tokens=self.special_tokens_list , show_progress=A_ , )
self._tokenizer.train_from_iterator(A_ , trainer=A_)
self.add_unk_id()
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = json.loads(self._tokenizer.to_str())
__lowerCAmelCase : Dict = self.special_tokens["unk"]["id"]
__lowerCAmelCase : Optional[Any] = Tokenizer.from_str(json.dumps(A_)) | 293 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__snake_case = 0
__snake_case = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__snake_case = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__snake_case = tuple[int, int]
class __lowerCamelCase :
def __init__( self: str,A_: int,A_: int,A_: int,A_: int,A_: int,A_: Node | None,):
'''simple docstring'''
__UpperCamelCase = pos_x
__UpperCamelCase = pos_y
__UpperCamelCase = (pos_y, pos_x)
__UpperCamelCase = goal_x
__UpperCamelCase = goal_y
__UpperCamelCase = g_cost
__UpperCamelCase = parent
__UpperCamelCase = self.calculate_heuristic()
__UpperCamelCase = self.g_cost + self.h_cost
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = self.pos_x - self.goal_x
__UpperCamelCase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(A_ ) + abs(A_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self: int,A_: Node ):
'''simple docstring'''
return self.f_cost < other.f_cost
class __lowerCamelCase :
def __init__( self: Any,A_: TPosition,A_: TPosition ):
'''simple docstring'''
__UpperCamelCase = Node(start[1],start[0],goal[1],goal[0],0,A_ )
__UpperCamelCase = Node(goal[1],goal[0],goal[1],goal[0],9_9999,A_ )
__UpperCamelCase = [self.start]
__UpperCamelCase = []
__UpperCamelCase = False
def snake_case_ ( self: Any ):
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__UpperCamelCase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(A_ )
self.closed_nodes.append(A_ )
__UpperCamelCase = self.get_successors(A_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(A_ )
else:
# retrieve the best current path
__UpperCamelCase = self.open_nodes.pop(self.open_nodes.index(A_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(A_ )
else:
self.open_nodes.append(A_ )
return [self.start.pos]
def snake_case_ ( self: int,A_: Node ):
'''simple docstring'''
__UpperCamelCase = []
for action in delta:
__UpperCamelCase = parent.pos_x + action[1]
__UpperCamelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
A_,A_,self.target.pos_y,self.target.pos_x,parent.g_cost + 1,A_,) )
return successors
def snake_case_ ( self: Any,A_: Node | None ):
'''simple docstring'''
__UpperCamelCase = node
__UpperCamelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__UpperCamelCase = current_node.parent
path.reverse()
return path
class __lowerCamelCase :
def __init__( self: List[Any],A_: TPosition,A_: TPosition ):
'''simple docstring'''
__UpperCamelCase = AStar(A_,A_ )
__UpperCamelCase = AStar(A_,A_ )
__UpperCamelCase = False
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__UpperCamelCase = self.fwd_astar.open_nodes.pop(0 )
__UpperCamelCase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
A_,A_ )
self.fwd_astar.closed_nodes.append(A_ )
self.bwd_astar.closed_nodes.append(A_ )
__UpperCamelCase = current_bwd_node
__UpperCamelCase = current_fwd_node
__UpperCamelCase = {
self.fwd_astar: self.fwd_astar.get_successors(A_ ),
self.bwd_astar: self.bwd_astar.get_successors(A_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(A_ )
else:
# retrieve the best current path
__UpperCamelCase = astar.open_nodes.pop(
astar.open_nodes.index(A_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(A_ )
else:
astar.open_nodes.append(A_ )
return [self.fwd_astar.start.pos]
def snake_case_ ( self: List[str],A_: Node,A_: Node ):
'''simple docstring'''
__UpperCamelCase = self.fwd_astar.retrace_path(A_ )
__UpperCamelCase = self.bwd_astar.retrace_path(A_ )
bwd_path.pop()
bwd_path.reverse()
__UpperCamelCase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__snake_case = (0, 0)
__snake_case = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__snake_case = time.time()
__snake_case = AStar(init, goal)
__snake_case = a_star.search()
__snake_case = time.time() - start_time
print(f"""AStar execution time = {end_time:f} seconds""")
__snake_case = time.time()
__snake_case = BidirectionalAStar(init, goal)
__snake_case = time.time() - bd_start_time
print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 1 | 0 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[Any] , snake_case__ :Optional[Any] , snake_case__ :Dict ) -> float:
_lowercase = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 67 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__snake_case = get_tests_dir('''fixtures''')
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: int ):
'''simple docstring'''
__UpperCamelCase = mock.Mock()
__UpperCamelCase = 500
__UpperCamelCase = {}
__UpperCamelCase = HTTPError
__UpperCamelCase = {}
# Download this model to make sure it's in the cache.
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request',return_value=A_ ) as mock_head:
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' )
@is_staging_test
class __lowerCamelCase (unittest.TestCase ):
@classmethod
def snake_case_ ( cls: Tuple ):
'''simple docstring'''
__UpperCamelCase = TOKEN
HfFolder.save_token(A_ )
@classmethod
def snake_case_ ( cls: Tuple ):
'''simple docstring'''
try:
delete_repo(token=cls._token,repo_id='test-feature-extractor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token,repo_id='valid_org/test-feature-extractor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token,repo_id='test-dynamic-feature-extractor' )
except HTTPError:
pass
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('test-feature-extractor',use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
# Reset repo
delete_repo(token=self._token,repo_id='test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
A_,repo_id='test-feature-extractor',push_to_hub=A_,use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('valid_org/test-feature-extractor',use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
# Reset repo
delete_repo(token=self._token,repo_id='valid_org/test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
A_,repo_id='valid_org/test-feature-extractor-org',push_to_hub=A_,use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
def snake_case_ ( self: int ):
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
__UpperCamelCase = CustomFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('test-dynamic-feature-extractor',use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map,{'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'},)
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(
F'''{USER}/test-dynamic-feature-extractor''',trust_remote_code=A_ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__,'CustomFeatureExtractor' )
| 1 | 0 |
from __future__ import annotations
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : int = []
lowerCamelCase : Dict = []
lowerCamelCase : str = 0
lowerCamelCase : str = sum(_lowercase )
create_state_space_tree(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
return result
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
if sum(_lowercase ) > max_sum or (remaining_nums_sum + sum(_lowercase )) < max_sum:
return
if sum(_lowercase ) == max_sum:
result.append(_lowercase )
return
for index in range(_lowercase , len(_lowercase ) ):
create_state_space_tree(
_lowercase , _lowercase , index + 1 , [*path, nums[index]] , _lowercase , remaining_nums_sum - nums[index] , )
_snake_case = [3, 34, 4, 12, 5, 2]
_snake_case = 9
_snake_case = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 340 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__snake_case = 1_6
__snake_case = 3_2
def _A ( _lowercase , _lowercase = 16 , _lowercase = "bert-base-cased" ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = AutoTokenizer.from_pretrained(_lowercase )
__UpperCamelCase = load_dataset('glue' , 'mrpc' )
def tokenize_function(_lowercase ):
# max_length=None => use the model max length (it's actually the default)
__UpperCamelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowercase , max_length=_lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__UpperCamelCase = datasets.map(
_lowercase , batched=_lowercase , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_lowercase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCamelCase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowercase , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return tokenizer.pad(_lowercase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
__UpperCamelCase = DataLoader(
tokenized_datasets['train'] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
__UpperCamelCase = DataLoader(
tokenized_datasets['validation'] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
return train_dataloader, eval_dataloader
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCamelCase = config['lr']
__UpperCamelCase = int(config['num_epochs'] )
__UpperCamelCase = int(config['seed'] )
__UpperCamelCase = int(config['batch_size'] )
__UpperCamelCase = args.model_name_or_path
set_seed(_lowercase )
__UpperCamelCase, __UpperCamelCase = get_dataloaders(_lowercase , _lowercase , _lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCamelCase = AutoModelForSequenceClassification.from_pretrained(_lowercase , return_dict=_lowercase )
# Instantiate optimizer
__UpperCamelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__UpperCamelCase = optimizer_cls(params=model.parameters() , lr=_lowercase )
if accelerator.state.deepspeed_plugin is not None:
__UpperCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
__UpperCamelCase = 1
__UpperCamelCase = (len(_lowercase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=_lowercase , num_warmup_steps=0 , num_training_steps=_lowercase , )
else:
__UpperCamelCase = DummyScheduler(_lowercase , total_num_steps=_lowercase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# We need to keep track of how many total steps we have iterated over
__UpperCamelCase = 0
# We also need to keep track of the stating epoch so files are named properly
__UpperCamelCase = 0
# Now we train the model
__UpperCamelCase = evaluate.load('glue' , 'mrpc' )
__UpperCamelCase = 0
__UpperCamelCase = {}
for epoch in range(_lowercase , _lowercase ):
model.train()
for step, batch in enumerate(_lowercase ):
__UpperCamelCase = model(**_lowercase )
__UpperCamelCase = outputs.loss
__UpperCamelCase = loss / gradient_accumulation_steps
accelerator.backward(_lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
__UpperCamelCase = 0
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCamelCase = model(**_lowercase )
__UpperCamelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__UpperCamelCase, __UpperCamelCase = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_lowercase ) - 1:
__UpperCamelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__UpperCamelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_lowercase , references=_lowercase , )
__UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , _lowercase )
__UpperCamelCase = eval_metric['accuracy']
if best_performance < eval_metric["accuracy"]:
__UpperCamelCase = eval_metric['accuracy']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(_lowercase , _lowercase )
def _A ( ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=_lowercase , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_lowercase , )
parser.add_argument(
'--output_dir' , type=_lowercase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=_lowercase , default=_lowercase , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=_lowercase , default=3 , help='Number of train epochs.' , )
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(_lowercase , _lowercase )
if __name__ == "__main__":
main()
| 1 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.